aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 20:53:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 20:53:45 -0700
commitcd6362befe4cc7bf589a5236d2a780af2d47bcc9 (patch)
tree3bd4e13ec3f92a00dc4f6c3d65e820b54dbfe46e /drivers
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid (diff)
parentnetpoll: Use skb_irq_freeable to make zap_completion_queue safe. (diff)
downloadwireguard-linux-cd6362befe4cc7bf589a5236d2a780af2d47bcc9.tar.xz
wireguard-linux-cd6362befe4cc7bf589a5236d2a780af2d47bcc9.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Here is my initial pull request for the networking subsystem during this merge window: 1) Support for ESN in AH (RFC 4302) from Fan Du. 2) Add full kernel doc for ethtool command structures, from Ben Hutchings. 3) Add BCM7xxx PHY driver, from Florian Fainelli. 4) Export computed TCP rate information in netlink socket dumps, from Eric Dumazet. 5) Allow IPSEC SA to be dumped partially using a filter, from Nicolas Dichtel. 6) Convert many drivers to pci_enable_msix_range(), from Alexander Gordeev. 7) Record SKB timestamps more efficiently, from Eric Dumazet. 8) Switch to microsecond resolution for TCP round trip times, also from Eric Dumazet. 9) Clean up and fix 6lowpan fragmentation handling by making use of the existing inet_frag api for it's implementation. 10) Add TX grant mapping to xen-netback driver, from Zoltan Kiss. 11) Auto size SKB lengths when composing netlink messages based upon past message sizes used, from Eric Dumazet. 12) qdisc dumps can take a long time, add a cond_resched(), From Eric Dumazet. 13) Sanitize netpoll core and drivers wrt. SKB handling semantics. Get rid of never-used-in-tree netpoll RX handling. From Eric W Biederman. 14) Support inter-address-family and namespace changing in VTI tunnel driver(s). From Steffen Klassert. 15) Add Altera TSE driver, from Vince Bridgers. 16) Optimizing csum_replace2() so that it doesn't adjust the checksum by checksumming the entire header, from Eric Dumazet. 17) Expand BPF internal implementation for faster interpreting, more direct translations into JIT'd code, and much cleaner uses of BPF filtering in non-socket ocntexts. From Daniel Borkmann and Alexei Starovoitov" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1976 commits) netpoll: Use skb_irq_freeable to make zap_completion_queue safe. net: Add a test to see if a skb is freeable in irq context qlcnic: Fix build failure due to undefined reference to `vxlan_get_rx_port' net: ptp: move PTP classifier in its own file net: sxgbe: make "core_ops" static net: sxgbe: fix logical vs bitwise operation net: sxgbe: sxgbe_mdio_register() frees the bus Call efx_set_channels() before efx->type->dimension_resources() xen-netback: disable rogue vif in kthread context net/mlx4: Set proper build dependancy with vxlan be2net: fix build dependency on VxLAN mac802154: make csma/cca parameters per-wpan mac802154: allow only one WPAN to be up at any given time net: filter: minor: fix kdoc in __sk_run_filter netlink: don't compare the nul-termination in nla_strcmp can: c_can: Avoid led toggling for every packet. can: c_can: Simplify TX interrupt cleanup can: c_can: Store dlc private can: c_can: Reduce register access can: c_can: Make the code readable ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/firestream.c6
-rw-r--r--drivers/atm/idt77105.c6
-rw-r--r--drivers/atm/nicstar.c24
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/bcma/driver_gpio.c9
-rw-r--r--drivers/bluetooth/ath3k.c97
-rw-r--r--drivers/bluetooth/bfusb.c14
-rw-r--r--drivers/bluetooth/bluecard_cs.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c7
-rw-r--r--drivers/bluetooth/btmrvl_main.c11
-rw-r--r--drivers/bluetooth/btuart_cs.c6
-rw-r--r--drivers/bluetooth/btusb.c59
-rw-r--r--drivers/bluetooth/dtl1_cs.c9
-rw-r--r--drivers/bluetooth/hci_bcsp.c31
-rw-r--r--drivers/bluetooth/hci_h5.c10
-rw-r--r--drivers/bluetooth/hci_ldisc.c9
-rw-r--r--drivers/bluetooth/hci_vhci.c3
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c24
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c177
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h9
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c45
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c140
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h6
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h5
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c80
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c42
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c122
-rw-r--r--drivers/infiniband/hw/mlx4/main.c31
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h24
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c319
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c5
-rw-r--r--drivers/isdn/act2000/module.c2
-rw-r--r--drivers/isdn/divert/divert_procfs.c7
-rw-r--r--drivers/isdn/hisax/elsa.c9
-rw-r--r--drivers/isdn/hisax/elsa_ser.c3
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c7
-rw-r--r--drivers/isdn/i4l/isdn_common.c15
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c61
-rw-r--r--drivers/isdn/pcbit/drv.c6
-rw-r--r--drivers/isdn/sc/init.c4
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/bonding/bond_3ad.c74
-rw-r--r--drivers/net/bonding/bond_3ad.h175
-rw-r--r--drivers/net/bonding/bond_alb.c138
-rw-r--r--drivers/net/bonding/bond_debugfs.c10
-rw-r--r--drivers/net/bonding/bond_main.c466
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/bonding/bond_options.c328
-rw-r--r--drivers/net/bonding/bond_options.h62
-rw-r--r--drivers/net/bonding/bond_procfs.c14
-rw-r--r--drivers/net/bonding/bond_sysfs.c24
-rw-r--r--drivers/net/bonding/bonding.h43
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/caif/caif_spi.c1
-rw-r--r--drivers/net/can/at91_can.c10
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/c_can/c_can.c349
-rw-r--r--drivers/net/can/c_can/c_can.h29
-rw-r--r--drivers/net/can/c_can/c_can_platform.c47
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/dev.c183
-rw-r--r--drivers/net/can/flexcan.c11
-rw-r--r--drivers/net/can/grcan.c1
-rw-r--r--drivers/net/can/janz-ican3.c65
-rw-r--r--drivers/net/can/mcp251x.c22
-rw-r--r--drivers/net/can/mscan/mscan.c7
-rw-r--r--drivers/net/can/pch_can.c1
-rw-r--r--drivers/net/can/sja1000/Kconfig13
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000.c10
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c220
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c194
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c3
-rw-r--r--drivers/net/dummy.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1127
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c9
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/Kconfig8
-rw-r--r--drivers/net/ethernet/altera/Makefile7
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c202
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.h34
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h167
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c509
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.h35
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h124
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h486
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c235
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1543
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c44
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h27
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c3
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c124
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c34
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c14
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c15
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c385
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c155
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c1876
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h368
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c619
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h22
-rw-r--r--drivers/net/ethernet/broadcom/genet/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2584
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h628
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c464
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c30
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c65
-rw-r--r--drivers/net/ethernet/cadence/macb.c8
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c230
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c172
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c152
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c6
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c24
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/dnet.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h27
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c213
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h97
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c30
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c467
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1327
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h114
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c165
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c340
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h55
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h52
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c64
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h55
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c427
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c373
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c370
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c476
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c546
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c117
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c434
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c135
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c369
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c90
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h16
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h48
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c299
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h75
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c25
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c76
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h36
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c76
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c170
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c64
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c360
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c200
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c212
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c141
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c161
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h33
-rw-r--r--drivers/net/ethernet/jme.c16
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c17
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c40
-rw-r--r--drivers/net/ethernet/neterion/s2io.c14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c35
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c57
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c11
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h121
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c91
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c85
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c233
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c102
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c25
-rw-r--r--drivers/net/ethernet/rdc/r6040.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c7
-rw-r--r--drivers/net/ethernet/realtek/8139too.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c271
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/samsung/Kconfig16
-rw-r--r--drivers/net/ethernet/samsung/Makefile5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Kconfig9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Makefile4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h535
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c262
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c515
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h298
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c382
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c524
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2317
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c244
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c254
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h104
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c259
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h488
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c91
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h38
-rw-r--r--drivers/net/ethernet/sfc/ef10.c27
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h61
-rw-r--r--drivers/net/ethernet/sfc/efx.c33
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c39
-rw-r--r--drivers/net/ethernet/sfc/falcon.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c5
-rw-r--r--drivers/net/ethernet/sfc/filter.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c14
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h1
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c93
-rw-r--r--drivers/net/ethernet/sfc/selftest.c6
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c13
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/silan/sc92031.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c130
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c1
-rw-r--r--drivers/net/ethernet/sun/niu.c11
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c34
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/tile/tilegx.c4
-rw-r--r--drivers/net/ethernet/tile/tilepro.c11
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c16
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c9
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c15
-rw-r--r--drivers/net/ethernet/xscale/Kconfig1
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c11
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h207
-rw-r--r--drivers/net/hyperv/netvsc.c93
-rw-r--r--drivers/net/hyperv/netvsc_drv.c337
-rw-r--r--drivers/net/hyperv/rndis_filter.c187
-rw-r--r--drivers/net/ieee802154/Kconfig34
-rw-r--r--drivers/net/ieee802154/at86rf230.c520
-rw-r--r--drivers/net/ieee802154/fakehard.c22
-rw-r--r--drivers/net/ieee802154/mrf24j40.c17
-rw-r--r--drivers/net/ifb.c8
-rw-r--r--drivers/net/loopback.c16
-rw-r--r--drivers/net/macvlan.c13
-rw-r--r--drivers/net/nlmon.c18
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/at803x.c30
-rw-r--r--drivers/net/phy/bcm7xxx.c359
-rw-r--r--drivers/net/phy/broadcom.c52
-rw-r--r--drivers/net/phy/dp83640.c94
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/mdio_bus.c20
-rw-r--r--drivers/net/phy/micrel.c49
-rw-r--r--drivers/net/phy/phy.c51
-rw-r--r--drivers/net/phy/phy_device.c57
-rw-r--r--drivers/net/ppp/ppp_generic.c60
-rw-r--r--drivers/net/team/team.c28
-rw-r--r--drivers/net/team/team_mode_loadbalance.c4
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c17
-rw-r--r--drivers/net/usb/lg-vl600.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c1124
-rw-r--r--drivers/net/veth.c23
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c112
-rw-r--r--drivers/net/vxlan.c13
-rw-r--r--drivers/net/wimax/i2400m/netdev.c3
-rw-r--r--drivers/net/wireless/Kconfig11
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/airo.c17
-rw-r--r--drivers/net/wireless/ath/ath.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c36
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h81
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h25
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c269
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c205
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c850
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c536
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c132
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h34
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c27
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c85
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c235
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c203
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c180
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.h26
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c86
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h27
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c260
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c240
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c54
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c179
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c247
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c1495
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h248
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c267
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c72
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c64
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h10
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c241
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c181
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c33
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c234
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c37
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c177
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c334
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h164
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c136
-rw-r--r--drivers/net/wireless/atmel.c8
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/debugfs.h2
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43/main.h35
-rw-r--r--drivers/net/wireless/b43/phy_common.c4
-rw-r--r--drivers/net/wireless/b43/pio.c10
-rw-r--r--drivers/net/wireless/b43/sysfs.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c14
-rw-r--r--drivers/net/wireless/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/b43legacy/sysfs.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c107
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c1034
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c982
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c972
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h231
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c283
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c20
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h3
-rw-r--r--drivers/net/wireless/cw1200/fwio.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c13
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c15
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h3
-rw-r--r--drivers/net/wireless/iwlegacy/common.c83
-rw-r--r--drivers/net/wireless/iwlegacy/common.h17
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c23
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c132
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c270
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h69
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h40
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c (renamed from drivers/net/wireless/iwlwifi/mvm/bt-coex.c)474
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c226
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c485
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h (renamed from drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h)21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h134
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h106
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c79
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c78
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c601
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h259
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c87
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/offloading.c215
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c481
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c426
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c100
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c210
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c255
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c213
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h62
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c43
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c138
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c85
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c52
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c410
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c73
-rw-r--r--drivers/net/wireless/libertas/cfg.c3
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c113
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h6
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c192
-rw-r--r--drivers/net/wireless/mwifiex/11ac.h2
-rw-r--r--drivers/net/wireless/mwifiex/11h.c4
-rw-r--r--drivers/net/wireless/mwifiex/11n.c106
-rw-r--r--drivers/net/wireless/mwifiex/11n.h58
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c203
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h3
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/README2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c403
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c205
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c161
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c8
-rw-r--r--drivers/net/wireless/mwifiex/decl.h23
-rw-r--r--drivers/net/wireless/mwifiex/fw.h202
-rw-r--r--drivers/net/wireless/mwifiex/ie.c6
-rw-r--r--drivers/net/wireless/mwifiex/init.c8
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h25
-rw-r--r--drivers/net/wireless/mwifiex/join.c66
-rw-r--r--drivers/net/wireless/mwifiex/main.c10
-rw-r--r--drivers/net/wireless/mwifiex/main.h112
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c180
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h5
-rw-r--r--drivers/net/wireless/mwifiex/scan.c615
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c11
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c471
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c130
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c52
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c164
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c34
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c3
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c1044
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c24
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c130
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c22
-rw-r--r--drivers/net/wireless/mwifiex/usb.c11
-rw-r--r--drivers/net/wireless/mwifiex/util.c118
-rw-r--r--drivers/net/wireless/mwifiex/util.h20
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c131
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h18
-rw-r--r--drivers/net/wireless/mwl8k.c197
-rw-r--r--drivers/net/wireless/orinoco/cfg.c5
-rw-r--r--drivers/net/wireless/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/orinoco/scan.c5
-rw-r--r--drivers/net/wireless/orinoco/wext.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c6
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c7
-rw-r--r--drivers/net/wireless/rsi/Kconfig30
-rw-r--r--drivers/net/wireless/rsi/Makefile12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c342
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c339
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1008
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c295
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c1304
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_pkt.c196
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c850
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c566
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c575
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c177
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h126
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h87
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h48
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h218
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h285
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h129
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1009
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8180.h76
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c23
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c475
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h61
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c20
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h269
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig27
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile3
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/Makefile7
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h75
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c3698
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h173
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c1011
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h559
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c218
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h52
-rw-r--r--drivers/net/wireless/rtlwifi/core.c124
-rw-r--r--drivers/net/wireless/rtlwifi/core.h4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c131
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h14
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c119
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h60
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/fw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c128
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/phy.c63
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/reg.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c31
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c50
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c429
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/reg.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c48
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c87
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/def.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.c47
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.c260
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.h18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c127
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c530
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/reg.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/Makefile19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/def.h248
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.c1325
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.h310
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/fw.c620
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/fw.h248
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c2523
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.h64
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/led.c153
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/led.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.c2156
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.h217
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c106
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h304
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c140
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h95
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/reg.h2277
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/rf.c504
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/rf.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c384
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/table.c572
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/table.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c960
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.h617
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/Makefile9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c65
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c329
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h126
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/main.c33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c434
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h89
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c4
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h518
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c37
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c31
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c71
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c67
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h53
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c85
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c4
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c24
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h8
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c200
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c45
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h27
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h86
-rw-r--r--drivers/net/wireless/wl3501_cs.c6
-rw-r--r--drivers/net/wireless/zd1201.c9
-rw-r--r--drivers/net/xen-netback/common.h112
-rw-r--r--drivers/net/xen-netback/interface.c141
-rw-r--r--drivers/net/xen-netback/netback.c813
-rw-r--r--drivers/net/xen-netfront.c14
-rw-r--r--drivers/nfc/Kconfig12
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/pn533.c28
-rw-r--r--drivers/nfc/pn544/i2c.c194
-rw-r--r--drivers/nfc/pn544/pn544.c2
-rw-r--r--drivers/nfc/pn544/pn544.h3
-rw-r--r--drivers/nfc/port100.c25
-rw-r--r--drivers/nfc/trf7970a.c1370
-rw-r--r--drivers/of/of_mdio.c23
-rw-r--r--drivers/of/of_net.c26
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/ptp/ptp_chardev.c128
-rw-r--r--drivers/ptp/ptp_clock.c23
-rw-r--r--drivers/ptp/ptp_ixp46x.c1
-rw-r--r--drivers/ptp/ptp_pch.c1
-rw-r--r--drivers/ptp/ptp_private.h8
-rw-r--r--drivers/ptp/ptp_sysfs.c115
-rw-r--r--drivers/s390/net/qeth_core.h7
-rw-r--r--drivers/s390/net/qeth_core_main.c4
-rw-r--r--drivers/s390/net/qeth_l2_main.c6
-rw-r--r--drivers/staging/octeon/ethernet-tx.c6
-rw-r--r--drivers/staging/rtl8821ae/rc.c1
-rw-r--r--drivers/staging/rtl8821ae/rtl8821ae/trx.c2
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c6
885 files changed, 79944 insertions, 22365 deletions
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 62a76076b548..f1a9198dfe5a 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1925,7 +1925,7 @@ static int ucode_init(loader_block *lb, amb_dev *dev)
const struct firmware *fw;
unsigned long start_address;
const struct ihex_binrec *rec;
- const char *errmsg = 0;
+ const char *errmsg = NULL;
int res;
res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index b41c9481b67b..82f2ae0d7cc4 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -736,8 +736,8 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
skb = td->skb;
if (skb == FS_VCC (ATM_SKB(skb)->vcc)->last_skb) {
- wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
FS_VCC (ATM_SKB(skb)->vcc)->last_skb = NULL;
+ wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
}
td->dev->ntxpckts--;
@@ -1123,7 +1123,7 @@ static void fs_close(struct atm_vcc *atm_vcc)
this sleep_on, we'll lose any reference to these packets. Memory leak!
On the other hand, it's awfully convenient that we can abort a "close" that
is taking too long. Maybe just use non-interruptible sleep on? -- REW */
- interruptible_sleep_on (& vcc->close_wait);
+ wait_event_interruptible(vcc->close_wait, !vcc->last_skb);
}
txtp = &atm_vcc->qos.txtp;
@@ -2000,7 +2000,7 @@ static void firestream_remove_one(struct pci_dev *pdev)
fs_dprintk (FS_DEBUG_CLEANUP, "Freeing irq%d.\n", dev->irq);
free_irq (dev->irq, dev);
- del_timer (&dev->timer);
+ del_timer_sync (&dev->timer);
atm_dev_deregister(dev->atm_dev);
free_queue (dev, &dev->hp_txq);
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 45d506363aba..909c95bd7be2 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -368,9 +368,9 @@ EXPORT_SYMBOL(idt77105_init);
static void __exit idt77105_exit(void)
{
- /* turn off timers */
- del_timer(&stats_timer);
- del_timer(&restart_timer);
+ /* turn off timers */
+ del_timer_sync(&stats_timer);
+ del_timer_sync(&restart_timer);
}
module_exit(idt77105_exit);
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 9587e959ce1a..9988ac98b6d8 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -639,9 +639,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
card->hbnr.init = NUM_HB;
card->hbnr.max = MAX_HB;
- card->sm_handle = 0x00000000;
+ card->sm_handle = NULL;
card->sm_addr = 0x00000000;
- card->lg_handle = 0x00000000;
+ card->lg_handle = NULL;
card->lg_addr = 0x00000000;
card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
@@ -979,7 +979,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
addr2 = card->sm_addr;
handle2 = card->sm_handle;
card->sm_addr = 0x00000000;
- card->sm_handle = 0x00000000;
+ card->sm_handle = NULL;
} else { /* (!sm_addr) */
card->sm_addr = addr1;
@@ -993,7 +993,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
addr2 = card->lg_addr;
handle2 = card->lg_handle;
card->lg_addr = 0x00000000;
- card->lg_handle = 0x00000000;
+ card->lg_handle = NULL;
} else { /* (!lg_addr) */
card->lg_addr = addr1;
@@ -1739,10 +1739,10 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
}
scq->full = 1;
- spin_unlock_irqrestore(&scq->lock, flags);
- interruptible_sleep_on_timeout(&scq->scqfull_waitq,
- SCQFULL_TIMEOUT);
- spin_lock_irqsave(&scq->lock, flags);
+ wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
+ scq->tail != scq->next,
+ scq->lock,
+ SCQFULL_TIMEOUT);
if (scq->full) {
spin_unlock_irqrestore(&scq->lock, flags);
@@ -1789,10 +1789,10 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
scq->full = 1;
if (has_run++)
break;
- spin_unlock_irqrestore(&scq->lock, flags);
- interruptible_sleep_on_timeout(&scq->scqfull_waitq,
- SCQFULL_TIMEOUT);
- spin_lock_irqsave(&scq->lock, flags);
+ wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
+ scq->tail != scq->next,
+ scq->lock,
+ SCQFULL_TIMEOUT);
}
if (!scq->full) {
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e3fb496c7163..943cf0d6abaf 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -760,7 +760,7 @@ static irqreturn_t solos_irq(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-void solos_bh(unsigned long card_arg)
+static void solos_bh(unsigned long card_arg)
{
struct solos_card *card = (void *)card_arg;
uint32_t card_flags;
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 25f9887a35d0..d7f81ad56b8a 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -218,7 +218,14 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
#if IS_BUILTIN(CONFIG_BCMA_HOST_SOC)
chip->to_irq = bcma_gpio_to_irq;
#endif
- chip->ngpio = 16;
+ switch (cc->core->bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM5357:
+ chip->ngpio = 32;
+ break;
+ default:
+ chip->ngpio = 16;
+ }
+
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
* a random base number. */
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 106d1d8e16ad..be571fef185d 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -62,50 +62,54 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0x3000) },
/* Atheros AR3011 with sflash firmware*/
+ { USB_DEVICE(0x0489, 0xE027) },
+ { USB_DEVICE(0x0489, 0xE03D) },
+ { USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0CF3, 0x3002) },
{ USB_DEVICE(0x0CF3, 0xE019) },
{ USB_DEVICE(0x13d3, 0x3304) },
- { USB_DEVICE(0x0930, 0x0215) },
- { USB_DEVICE(0x0489, 0xE03D) },
- { USB_DEVICE(0x0489, 0xE027) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },
/* Atheros AR3012 with sflash firmware*/
- { USB_DEVICE(0x0CF3, 0x0036) },
- { USB_DEVICE(0x0CF3, 0x3004) },
- { USB_DEVICE(0x0CF3, 0x3008) },
- { USB_DEVICE(0x0CF3, 0x311D) },
- { USB_DEVICE(0x0CF3, 0x817a) },
- { USB_DEVICE(0x13d3, 0x3375) },
+ { USB_DEVICE(0x0489, 0xe04d) },
+ { USB_DEVICE(0x0489, 0xe04e) },
+ { USB_DEVICE(0x0489, 0xe057) },
+ { USB_DEVICE(0x0489, 0xe056) },
+ { USB_DEVICE(0x0489, 0xe05f) },
+ { USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x04CA, 0x3006) },
{ USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x04CA, 0x300b) },
- { USB_DEVICE(0x13d3, 0x3362) },
- { USB_DEVICE(0x0CF3, 0xE004) },
- { USB_DEVICE(0x0CF3, 0xE005) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x0220) },
- { USB_DEVICE(0x0489, 0xe057) },
- { USB_DEVICE(0x13d3, 0x3393) },
- { USB_DEVICE(0x0489, 0xe04e) },
- { USB_DEVICE(0x0489, 0xe056) },
- { USB_DEVICE(0x0489, 0xe04d) },
- { USB_DEVICE(0x04c5, 0x1330) },
- { USB_DEVICE(0x13d3, 0x3402) },
+ { USB_DEVICE(0x0b05, 0x17d0) },
+ { USB_DEVICE(0x0CF3, 0x0036) },
+ { USB_DEVICE(0x0CF3, 0x3004) },
+ { USB_DEVICE(0x0CF3, 0x3005) },
+ { USB_DEVICE(0x0CF3, 0x3008) },
+ { USB_DEVICE(0x0CF3, 0x311D) },
+ { USB_DEVICE(0x0CF3, 0x311E) },
+ { USB_DEVICE(0x0CF3, 0x311F) },
{ USB_DEVICE(0x0cf3, 0x3121) },
+ { USB_DEVICE(0x0CF3, 0x817a) },
{ USB_DEVICE(0x0cf3, 0xe003) },
- { USB_DEVICE(0x0489, 0xe05f) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
+ { USB_DEVICE(0x0CF3, 0xE005) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x13d3, 0x3375) },
+ { USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3402) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
/* Atheros AR5BBU22 with sflash firmware */
- { USB_DEVICE(0x0489, 0xE03C) },
{ USB_DEVICE(0x0489, 0xE036) },
+ { USB_DEVICE(0x0489, 0xE03C) },
{ } /* Terminating entry */
};
@@ -118,36 +122,40 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
static const struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
- { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
- { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */
};
@@ -174,10 +182,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
}
memcpy(send_buf, firmware->data, 20);
- if ((err = usb_control_msg(udev, pipe,
- USB_REQ_DFU_DNLOAD,
- USB_TYPE_VENDOR, 0, 0,
- send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
+ err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR,
+ 0, 0, send_buf, 20, USB_CTRL_SET_TIMEOUT);
+ if (err < 0) {
BT_ERR("Can't change to loading configuration err");
goto error;
}
@@ -360,7 +367,7 @@ static int ath3k_load_patch(struct usb_device *udev)
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
- fw_version.rom_version);
+ le32_to_cpu(fw_version.rom_version));
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {
@@ -422,7 +429,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
- fw_version.rom_version, clk_value, ".dfu");
+ le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 31386998c9a7..b2e7e94a6771 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -131,8 +131,11 @@ static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb)
BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len);
- if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
- return -ENOMEM;
+ if (!urb) {
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+ }
pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep);
@@ -218,8 +221,11 @@ static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb)
BT_DBG("bfusb %p urb %p", data, urb);
- if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
- return -ENOMEM;
+ if (!urb) {
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+ }
skb = bt_skb_alloc(size, GFP_ATOMIC);
if (!skb) {
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 57427de864a6..dfa5043e68ba 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -257,7 +257,8 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
ready_bit = XMIT_BUF_ONE_READY;
}
- if (!(skb = skb_dequeue(&(info->txq))))
+ skb = skb_dequeue(&(info->txq));
+ if (!skb)
break;
if (bt_cb(skb)->pkt_type & 0x80) {
@@ -391,7 +392,8 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
- if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
return;
}
@@ -566,7 +568,8 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
/* Ericsson baud rate command */
unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
- if (!(skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate mem for new packet");
return -1;
}
@@ -898,7 +901,7 @@ static void bluecard_release(struct pcmcia_device *link)
bluecard_close(info);
- del_timer(&(info->timer));
+ del_timer_sync(&(info->timer));
pcmcia_disable_device(link);
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 73d87994d028..1d82721cf9c6 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -193,8 +193,8 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
if (!pcmcia_dev_present(info->p_dev))
break;
-
- if (!(skb = skb_dequeue(&(info->txq)))) {
+ skb = skb_dequeue(&(info->txq));
+ if (!skb) {
clear_bit(XMIT_SENDING, &(info->tx_state));
break;
}
@@ -238,7 +238,8 @@ static void bt3c_receive(bt3c_info_t *info)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
- if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
return;
}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 1e0320af00c6..2c4997ce2484 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -59,12 +59,13 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
priv->btmrvl_dev.sendcmdflag = false;
priv->adapter->cmd_complete = true;
wake_up_interruptible(&priv->adapter->cmd_wait_q);
- }
- if (hci_opcode_ogf(opcode) == 0x3F) {
- BT_DBG("vendor event skipped: opcode=%#4.4x", opcode);
- kfree_skb(skb);
- return false;
+ if (hci_opcode_ogf(opcode) == 0x3F) {
+ BT_DBG("vendor event skipped: opcode=%#4.4x",
+ opcode);
+ kfree_skb(skb);
+ return false;
+ }
}
}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index a03ecc22a561..fb948f02eda5 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -149,7 +149,8 @@ static void btuart_write_wakeup(btuart_info_t *info)
if (!pcmcia_dev_present(info->p_dev))
return;
- if (!(skb = skb_dequeue(&(info->txq))))
+ skb = skb_dequeue(&(info->txq));
+ if (!skb)
break;
/* Send frame */
@@ -190,7 +191,8 @@ static void btuart_receive(btuart_info_t *info)
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
- if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
return;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index baeaaed299e4..f338b0c5a8de 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -101,21 +101,24 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0c10, 0x0000) },
/* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0489, 0xe042) },
+ { USB_DEVICE(0x04ca, 0x2003) },
{ USB_DEVICE(0x0b05, 0x17b5) },
{ USB_DEVICE(0x0b05, 0x17cb) },
- { USB_DEVICE(0x04ca, 0x2003) },
- { USB_DEVICE(0x0489, 0xe042) },
{ USB_DEVICE(0x413c, 0x8197) },
/* Foxconn - Hon Hai */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
- /*Broadcom devices with vendor specific id */
+ /* Broadcom devices with vendor specific id */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
/* Belkin F8065bf - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
+ /* IMC Networks - Broadcom based */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
+
{ } /* Terminating entry */
};
@@ -129,55 +132,59 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
/* Atheros 3011 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
/* Atheros 3012 with sflash firmware */
- { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
/* Atheros AR5BBU12 with sflash firmware */
- { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* Broadcom BCM2035 */
- { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
- { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
+ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
/* Broadcom BCM2045 */
{ USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU },
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 52eed1f3565d..2bd8fad17206 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -153,7 +153,8 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
if (!pcmcia_dev_present(info->p_dev))
return;
- if (!(skb = skb_dequeue(&(info->txq))))
+ skb = skb_dequeue(&(info->txq));
+ if (!skb)
break;
/* Send frame */
@@ -215,13 +216,15 @@ static void dtl1_receive(dtl1_info_t *info)
info->hdev->stat.byte_rx++;
/* Allocate packet */
- if (info->rx_skb == NULL)
- if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+ if (info->rx_skb == NULL) {
+ info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!info->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
return;
}
+ }
*skb_put(info->rx_skb, 1) = inb(iobase + UART_RX);
nsh = (nsh_t *)info->rx_skb->data;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 0bc87f7abd95..21cc45b34f13 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -291,7 +291,8 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
/* First of all, check for unreliable messages in the queue,
since they have priority */
- if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) {
+ skb = skb_dequeue(&bcsp->unrel);
+ if (skb != NULL) {
struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
if (nskb) {
kfree_skb(skb);
@@ -308,16 +309,20 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
- if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
- struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
- if (nskb) {
- __skb_queue_tail(&bcsp->unack, skb);
- mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
- spin_unlock_irqrestore(&bcsp->unack.lock, flags);
- return nskb;
- } else {
- skb_queue_head(&bcsp->rel, skb);
- BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ if (bcsp->unack.qlen < BCSP_TXWINSIZE) {
+ skb = skb_dequeue(&bcsp->rel);
+ if (skb != NULL) {
+ struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
+ bt_cb(skb)->pkt_type);
+ if (nskb) {
+ __skb_queue_tail(&bcsp->unack, skb);
+ mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
+ spin_unlock_irqrestore(&bcsp->unack.lock, flags);
+ return nskb;
+ } else {
+ skb_queue_head(&bcsp->rel, skb);
+ BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ }
}
}
@@ -715,6 +720,9 @@ static int bcsp_open(struct hci_uart *hu)
static int bcsp_close(struct hci_uart *hu)
{
struct bcsp_struct *bcsp = hu->priv;
+
+ del_timer_sync(&bcsp->tbcsp);
+
hu->priv = NULL;
BT_DBG("hu %p", hu);
@@ -722,7 +730,6 @@ static int bcsp_close(struct hci_uart *hu)
skb_queue_purge(&bcsp->unack);
skb_queue_purge(&bcsp->rel);
skb_queue_purge(&bcsp->unrel);
- del_timer(&bcsp->tbcsp);
kfree(bcsp);
return 0;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index f6f497450560..04680ead9275 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -206,12 +206,12 @@ static int h5_close(struct hci_uart *hu)
{
struct h5 *h5 = hu->priv;
+ del_timer_sync(&h5->timer);
+
skb_queue_purge(&h5->unack);
skb_queue_purge(&h5->rel);
skb_queue_purge(&h5->unrel);
- del_timer(&h5->timer);
-
kfree(h5);
return 0;
@@ -673,7 +673,8 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
}
- if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
+ skb = skb_dequeue(&h5->unrel);
+ if (skb != NULL) {
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
skb->data, skb->len);
if (nskb) {
@@ -690,7 +691,8 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
if (h5->unack.qlen >= h5->tx_win)
goto unlock;
- if ((skb = skb_dequeue(&h5->rel)) != NULL) {
+ skb = skb_dequeue(&h5->rel);
+ if (skb != NULL) {
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
skb->data, skb->len);
if (nskb) {
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 6e06f6f69152..f1fbf4f1e5be 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -271,7 +271,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
- if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
+ hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL);
+ if (!hu) {
BT_ERR("Can't allocate control structure");
return -ENFILE;
}
@@ -569,7 +570,8 @@ static int __init hci_uart_init(void)
hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
hci_uart_ldisc.owner = THIS_MODULE;
- if ((err = tty_register_ldisc(N_HCI, &hci_uart_ldisc))) {
+ err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
+ if (err) {
BT_ERR("HCI line discipline registration failed. (%d)", err);
return err;
}
@@ -614,7 +616,8 @@ static void __exit hci_uart_exit(void)
#endif
/* Release tty registration of line discipline */
- if ((err = tty_unregister_ldisc(N_HCI)))
+ err = tty_unregister_ldisc(N_HCI);
+ if (err)
BT_ERR("Can't unregister HCI line discipline (%d)", err);
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 1ef6990a5c7e..add1c6a72063 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -359,7 +359,7 @@ static const struct file_operations vhci_fops = {
static struct miscdevice vhci_miscdev= {
.name = "vhci",
.fops = &vhci_fops,
- .minor = MISC_DYNAMIC_MINOR,
+ .minor = VHCI_MINOR,
};
static int __init vhci_init(void)
@@ -385,3 +385,4 @@ MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("devname:vhci");
+MODULE_ALIAS_MISCDEV(VHCI_MINOR);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 77afe7487d34..b14f1d36f897 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -145,7 +145,6 @@ static int cn_call_callback(struct sk_buff *skb)
spin_unlock_bh(&dev->cbdev->queue_lock);
if (cbq != NULL) {
- err = 0;
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index d286bdebe2ab..7e98a58aacfd 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1647,6 +1647,15 @@ static inline int act_open_has_tid(int status)
status != CPL_ERR_ARP_MISS;
}
+/* Returns whether a CPL status conveys negative advice.
+ */
+static int is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
#define ACT_OPEN_RETRY_COUNT 2
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
@@ -1835,7 +1844,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
status, status2errno(status));
- if (status == CPL_ERR_RTX_NEG_ADVICE) {
+ if (is_neg_adv(status)) {
printk(KERN_WARNING MOD "Connection problems for atid %u\n",
atid);
return 0;
@@ -2265,15 +2274,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
-/*
- * Returns whether an ABORT_REQ_RSS message is a negative advice.
- */
-static int is_neg_adv_abort(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE;
-}
-
static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -2287,7 +2287,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int tid = GET_TID(req);
ep = lookup_tid(t, tid);
- if (is_neg_adv_abort(req->status)) {
+ if (is_neg_adv(req->status)) {
PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
ep->hwtid);
return 0;
@@ -3570,7 +3570,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
kfree_skb(skb);
return 0;
}
- if (is_neg_adv_abort(req->status)) {
+ if (is_neg_adv(req->status)) {
PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
ep->hwtid);
kfree_skb(skb);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 4a033853312e..ba7335fd4ebf 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -64,6 +64,10 @@ struct uld_ctx {
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
+#define DB_FC_RESUME_SIZE 64
+#define DB_FC_RESUME_DELAY 1
+#define DB_FC_DRAIN_THRESH 0
+
static struct dentry *c4iw_debugfs_root;
struct c4iw_debugfs_data {
@@ -282,7 +286,7 @@ static const struct file_operations stag_debugfs_fops = {
.llseek = default_llseek,
};
-static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
+static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
static int stats_show(struct seq_file *seq, void *v)
{
@@ -311,9 +315,10 @@ static int stats_show(struct seq_file *seq, void *v)
seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
- seq_printf(seq, " DB State: %s Transitions %llu\n",
+ seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
db_state_str[dev->db_state],
- dev->rdev.stats.db_state_transitions);
+ dev->rdev.stats.db_state_transitions,
+ dev->rdev.stats.db_fc_interruptions);
seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
dev->rdev.stats.act_ofld_conn_fails);
@@ -643,6 +648,12 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
goto err4;
}
+ rdev->status_page = (struct t4_dev_status_page *)
+ __get_free_page(GFP_KERNEL);
+ if (!rdev->status_page) {
+ pr_err(MOD "error allocating status page\n");
+ goto err4;
+ }
return 0;
err4:
c4iw_rqtpool_destroy(rdev);
@@ -656,6 +667,7 @@ err1:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
+ free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource);
@@ -703,18 +715,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
pr_info("%s: On-Chip Queues not supported on this device.\n",
pci_name(infop->pdev));
- if (!is_t4(infop->adapter_type)) {
- if (!allow_db_fc_on_t5) {
- db_fc_threshold = 100000;
- pr_info("DB Flow Control Disabled.\n");
- }
-
- if (!allow_db_coalescing_on_t5) {
- db_coalescing_threshold = -1;
- pr_info("DB Coalescing Disabled.\n");
- }
- }
-
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -749,6 +749,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
spin_lock_init(&devp->lock);
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
+ INIT_LIST_HEAD(&devp->db_fc_list);
if (c4iw_debugfs_root) {
devp->debugfs_root = debugfs_create_dir(
@@ -977,13 +978,16 @@ static int disable_qp_db(int id, void *p, void *data)
static void stop_queues(struct uld_ctx *ctx)
{
- spin_lock_irq(&ctx->dev->lock);
- if (ctx->dev->db_state == NORMAL) {
- ctx->dev->rdev.stats.db_state_transitions++;
- ctx->dev->db_state = FLOW_CONTROL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->dev->lock, flags);
+ ctx->dev->rdev.stats.db_state_transitions++;
+ ctx->dev->db_state = STOPPED;
+ if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
- }
- spin_unlock_irq(&ctx->dev->lock);
+ else
+ ctx->dev->rdev.status_page->db_off = 1;
+ spin_unlock_irqrestore(&ctx->dev->lock, flags);
}
static int enable_qp_db(int id, void *p, void *data)
@@ -994,15 +998,70 @@ static int enable_qp_db(int id, void *p, void *data)
return 0;
}
+static void resume_rc_qp(struct c4iw_qp *qp)
+{
+ spin_lock(&qp->lock);
+ t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc);
+ qp->wq.sq.wq_pidx_inc = 0;
+ t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc);
+ qp->wq.rq.wq_pidx_inc = 0;
+ spin_unlock(&qp->lock);
+}
+
+static void resume_a_chunk(struct uld_ctx *ctx)
+{
+ int i;
+ struct c4iw_qp *qp;
+
+ for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
+ qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
+ db_fc_entry);
+ list_del_init(&qp->db_fc_entry);
+ resume_rc_qp(qp);
+ if (list_empty(&ctx->dev->db_fc_list))
+ break;
+ }
+}
+
static void resume_queues(struct uld_ctx *ctx)
{
spin_lock_irq(&ctx->dev->lock);
- if (ctx->dev->qpcnt <= db_fc_threshold &&
- ctx->dev->db_state == FLOW_CONTROL) {
- ctx->dev->db_state = NORMAL;
- ctx->dev->rdev.stats.db_state_transitions++;
- idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
+ if (ctx->dev->db_state != STOPPED)
+ goto out;
+ ctx->dev->db_state = FLOW_CONTROL;
+ while (1) {
+ if (list_empty(&ctx->dev->db_fc_list)) {
+ WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
+ ctx->dev->db_state = NORMAL;
+ ctx->dev->rdev.stats.db_state_transitions++;
+ if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
+ idr_for_each(&ctx->dev->qpidr, enable_qp_db,
+ NULL);
+ } else {
+ ctx->dev->rdev.status_page->db_off = 0;
+ }
+ break;
+ } else {
+ if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
+ < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
+ DB_FC_DRAIN_THRESH)) {
+ resume_a_chunk(ctx);
+ }
+ if (!list_empty(&ctx->dev->db_fc_list)) {
+ spin_unlock_irq(&ctx->dev->lock);
+ if (DB_FC_RESUME_DELAY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(DB_FC_RESUME_DELAY);
+ }
+ spin_lock_irq(&ctx->dev->lock);
+ if (ctx->dev->db_state != FLOW_CONTROL)
+ break;
+ }
+ }
}
+out:
+ if (ctx->dev->db_state != NORMAL)
+ ctx->dev->rdev.stats.db_fc_interruptions++;
spin_unlock_irq(&ctx->dev->lock);
}
@@ -1028,12 +1087,12 @@ static int count_qps(int id, void *p, void *data)
return 0;
}
-static void deref_qps(struct qp_list qp_list)
+static void deref_qps(struct qp_list *qp_list)
{
int idx;
- for (idx = 0; idx < qp_list.idx; idx++)
- c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
+ for (idx = 0; idx < qp_list->idx; idx++)
+ c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
}
static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
@@ -1044,17 +1103,22 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
for (idx = 0; idx < qp_list->idx; idx++) {
struct c4iw_qp *qp = qp_list->qps[idx];
+ spin_lock_irq(&qp->rhp->lock);
+ spin_lock(&qp->lock);
ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
qp->wq.sq.qid,
t4_sq_host_wq_pidx(&qp->wq),
t4_sq_wq_size(&qp->wq));
if (ret) {
- printk(KERN_ERR MOD "%s: Fatal error - "
+ pr_err(KERN_ERR MOD "%s: Fatal error - "
"DB overflow recovery failed - "
"error syncing SQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
+ spin_unlock(&qp->lock);
+ spin_unlock_irq(&qp->rhp->lock);
return;
}
+ qp->wq.sq.wq_pidx_inc = 0;
ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
qp->wq.rq.qid,
@@ -1062,12 +1126,17 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_rq_wq_size(&qp->wq));
if (ret) {
- printk(KERN_ERR MOD "%s: Fatal error - "
+ pr_err(KERN_ERR MOD "%s: Fatal error - "
"DB overflow recovery failed - "
"error syncing RQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
+ spin_unlock(&qp->lock);
+ spin_unlock_irq(&qp->rhp->lock);
return;
}
+ qp->wq.rq.wq_pidx_inc = 0;
+ spin_unlock(&qp->lock);
+ spin_unlock_irq(&qp->rhp->lock);
/* Wait for the dbfifo to drain */
while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
@@ -1083,36 +1152,22 @@ static void recover_queues(struct uld_ctx *ctx)
struct qp_list qp_list;
int ret;
- /* lock out kernel db ringers */
- mutex_lock(&ctx->dev->db_mutex);
-
- /* put all queues in to recovery mode */
- spin_lock_irq(&ctx->dev->lock);
- ctx->dev->db_state = RECOVERY;
- ctx->dev->rdev.stats.db_state_transitions++;
- idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
- spin_unlock_irq(&ctx->dev->lock);
-
/* slow everybody down */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(1000));
- /* Wait for the dbfifo to completely drain. */
- while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(10));
- }
-
/* flush the SGE contexts */
ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
if (ret) {
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
pci_name(ctx->lldi.pdev));
- goto out;
+ return;
}
/* Count active queues so we can build a list of queues to recover */
spin_lock_irq(&ctx->dev->lock);
+ WARN_ON(ctx->dev->db_state != STOPPED);
+ ctx->dev->db_state = RECOVERY;
idr_for_each(&ctx->dev->qpidr, count_qps, &count);
qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
@@ -1120,7 +1175,7 @@ static void recover_queues(struct uld_ctx *ctx)
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
pci_name(ctx->lldi.pdev));
spin_unlock_irq(&ctx->dev->lock);
- goto out;
+ return;
}
qp_list.idx = 0;
@@ -1133,29 +1188,13 @@ static void recover_queues(struct uld_ctx *ctx)
recover_lost_dbs(ctx, &qp_list);
/* we're almost done! deref the qps and clean up */
- deref_qps(qp_list);
+ deref_qps(&qp_list);
kfree(qp_list.qps);
- /* Wait for the dbfifo to completely drain again */
- while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(10));
- }
-
- /* resume the queues */
spin_lock_irq(&ctx->dev->lock);
- if (ctx->dev->qpcnt > db_fc_threshold)
- ctx->dev->db_state = FLOW_CONTROL;
- else {
- ctx->dev->db_state = NORMAL;
- idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
- }
- ctx->dev->rdev.stats.db_state_transitions++;
+ WARN_ON(ctx->dev->db_state != RECOVERY);
+ ctx->dev->db_state = STOPPED;
spin_unlock_irq(&ctx->dev->lock);
-
-out:
- /* start up kernel db ringers again */
- mutex_unlock(&ctx->dev->db_mutex);
}
static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
@@ -1165,9 +1204,7 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
switch (control) {
case CXGB4_CONTROL_DB_FULL:
stop_queues(ctx);
- mutex_lock(&ctx->dev->rdev.stats.lock);
ctx->dev->rdev.stats.db_full++;
- mutex_unlock(&ctx->dev->rdev.stats.lock);
break;
case CXGB4_CONTROL_DB_EMPTY:
resume_queues(ctx);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 23eaeabab93b..eb18f9be35e4 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -109,6 +109,7 @@ struct c4iw_dev_ucontext {
enum c4iw_rdev_flags {
T4_FATAL_ERROR = (1<<0),
+ T4_STATUS_PAGE_DISABLED = (1<<1),
};
struct c4iw_stat {
@@ -130,6 +131,7 @@ struct c4iw_stats {
u64 db_empty;
u64 db_drop;
u64 db_state_transitions;
+ u64 db_fc_interruptions;
u64 tcam_full;
u64 act_ofld_conn_fails;
u64 pas_ofld_conn_fails;
@@ -150,6 +152,7 @@ struct c4iw_rdev {
unsigned long oc_mw_pa;
void __iomem *oc_mw_kva;
struct c4iw_stats stats;
+ struct t4_dev_status_page *status_page;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -211,7 +214,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
enum db_state {
NORMAL = 0,
FLOW_CONTROL = 1,
- RECOVERY = 2
+ RECOVERY = 2,
+ STOPPED = 3
};
struct c4iw_dev {
@@ -225,10 +229,10 @@ struct c4iw_dev {
struct mutex db_mutex;
struct dentry *debugfs_root;
enum db_state db_state;
- int qpcnt;
struct idr hwtid_idr;
struct idr atid_idr;
struct idr stid_idr;
+ struct list_head db_fc_list;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -432,6 +436,7 @@ struct c4iw_qp_attributes {
struct c4iw_qp {
struct ib_qp ibqp;
+ struct list_head db_fc_entry;
struct c4iw_dev *rhp;
struct c4iw_ep *ep;
struct c4iw_qp_attributes attr;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 7e94c9a656a1..79429256023a 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -106,15 +106,56 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
{
struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
+ static int warned;
+ struct c4iw_alloc_ucontext_resp uresp;
+ int ret = 0;
+ struct c4iw_mm_entry *mm = NULL;
PDBG("%s ibdev %p\n", __func__, ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return ERR_PTR(-ENOMEM);
+ if (!context) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
+
+ if (udata->outlen < sizeof(uresp)) {
+ if (!warned++)
+ pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
+ rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
+ } else {
+ mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ uresp.status_page_size = PAGE_SIZE;
+
+ spin_lock(&context->mmap_lock);
+ uresp.status_page_key = context->key;
+ context->key += PAGE_SIZE;
+ spin_unlock(&context->mmap_lock);
+
+ ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (ret)
+ goto err_mm;
+
+ mm->key = uresp.status_page_key;
+ mm->addr = virt_to_phys(rhp->rdev.status_page);
+ mm->len = PAGE_SIZE;
+ insert_mmap(context, mm);
+ }
return &context->ibucontext;
+err_mm:
+ kfree(mm);
+err_free:
+ kfree(context);
+err:
+ return ERR_PTR(ret);
}
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 582936708e6e..3b62eb556a47 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -638,6 +638,46 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
wake_up(&(to_c4iw_qp(qp)->wait));
}
+static void add_to_fc_list(struct list_head *head, struct list_head *entry)
+{
+ if (list_empty(entry))
+ list_add_tail(entry, head);
+}
+
+static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qhp->rhp->lock, flags);
+ spin_lock(&qhp->lock);
+ if (qhp->rhp->db_state == NORMAL) {
+ t4_ring_sq_db(&qhp->wq, inc);
+ } else {
+ add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
+ qhp->wq.sq.wq_pidx_inc += inc;
+ }
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ return 0;
+}
+
+static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qhp->rhp->lock, flags);
+ spin_lock(&qhp->lock);
+ if (qhp->rhp->db_state == NORMAL) {
+ t4_ring_rq_db(&qhp->wq, inc);
+ } else {
+ add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
+ qhp->wq.rq.wq_pidx_inc += inc;
+ }
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ return 0;
+}
+
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -750,9 +790,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
- if (t4_wq_db_enabled(&qhp->wq))
+ if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_sq_db(&qhp->wq, idx);
- spin_unlock_irqrestore(&qhp->lock, flag);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ } else {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ring_kernel_sq_db(qhp, idx);
+ }
return err;
}
@@ -812,9 +856,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wr = wr->next;
num_wrs--;
}
- if (t4_wq_db_enabled(&qhp->wq))
+ if (!qhp->rhp->rdev.status_page->db_off) {
t4_ring_rq_db(&qhp->wq, idx);
- spin_unlock_irqrestore(&qhp->lock, flag);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ } else {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ring_kernel_rq_db(qhp, idx);
+ }
return err;
}
@@ -1200,35 +1248,6 @@ out:
return ret;
}
-/*
- * Called by the library when the qp has user dbs disabled due to
- * a DB_FULL condition. This function will single-thread all user
- * DB rings to avoid overflowing the hw db-fifo.
- */
-static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
-{
- int delay = db_delay_usecs;
-
- mutex_lock(&qhp->rhp->db_mutex);
- do {
-
- /*
- * The interrupt threshold is dbfifo_int_thresh << 6. So
- * make sure we don't cross that and generate an interrupt.
- */
- if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
- (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
- writel(QID(qid) | PIDX(inc), qhp->wq.db);
- break;
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(delay));
- delay = min(delay << 1, 2000);
- } while (1);
- mutex_unlock(&qhp->rhp->db_mutex);
- return 0;
-}
-
int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
enum c4iw_qp_attr_mask mask,
struct c4iw_qp_attributes *attrs,
@@ -1278,11 +1297,11 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
if (mask & C4IW_QP_ATTR_SQ_DB) {
- ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
+ ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
goto out;
}
if (mask & C4IW_QP_ATTR_RQ_DB) {
- ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
+ ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
goto out;
}
@@ -1465,14 +1484,6 @@ out:
return ret;
}
-static int enable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_enable_wq_db(&qp->wq);
- return 0;
-}
-
int c4iw_destroy_qp(struct ib_qp *ib_qp)
{
struct c4iw_dev *rhp;
@@ -1490,22 +1501,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
- spin_lock_irq(&rhp->lock);
- remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
- rhp->qpcnt--;
- BUG_ON(rhp->qpcnt < 0);
- if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
- rhp->rdev.stats.db_state_transitions++;
- rhp->db_state = NORMAL;
- idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
- }
- if (db_coalescing_threshold >= 0)
- if (rhp->qpcnt <= db_coalescing_threshold)
- cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
- spin_unlock_irq(&rhp->lock);
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
+ spin_lock_irq(&rhp->lock);
+ if (!list_empty(&qhp->db_fc_entry))
+ list_del_init(&qhp->db_fc_entry);
+ spin_unlock_irq(&rhp->lock);
+
ucontext = ib_qp->uobject ?
to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
destroy_qp(&rhp->rdev, &qhp->wq,
@@ -1516,14 +1520,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
return 0;
}
-static int disable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_disable_wq_db(&qp->wq);
- return 0;
-}
-
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct ib_udata *udata)
{
@@ -1610,20 +1606,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
- spin_lock_irq(&rhp->lock);
- if (rhp->db_state != NORMAL)
- t4_disable_wq_db(&qhp->wq);
- rhp->qpcnt++;
- if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
- rhp->rdev.stats.db_state_transitions++;
- rhp->db_state = FLOW_CONTROL;
- idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
- }
- if (db_coalescing_threshold >= 0)
- if (rhp->qpcnt > db_coalescing_threshold)
- cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
- ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
- spin_unlock_irq(&rhp->lock);
+ ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
goto err2;
@@ -1709,6 +1692,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
+ INIT_LIST_HEAD(&qhp->db_fc_entry);
PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
__func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.sq.qid);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e73ace739183..eeca8b1e6376 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -300,6 +300,7 @@ struct t4_sq {
u16 cidx;
u16 pidx;
u16 wq_pidx;
+ u16 wq_pidx_inc;
u16 flags;
short flush_cidx;
};
@@ -324,6 +325,7 @@ struct t4_rq {
u16 cidx;
u16 pidx;
u16 wq_pidx;
+ u16 wq_pidx_inc;
};
struct t4_wq {
@@ -609,3 +611,7 @@ static inline void t4_set_cq_in_error(struct t4_cq *cq)
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
}
#endif
+
+struct t4_dev_status_page {
+ u8 db_off;
+};
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
index 32b754c35ab7..11ccd276e5d9 100644
--- a/drivers/infiniband/hw/cxgb4/user.h
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -70,4 +70,9 @@ struct c4iw_create_qp_resp {
__u32 qid_mask;
__u32 flags;
};
+
+struct c4iw_alloc_ucontext_resp {
+ __u64 status_page_key;
+ __u32 status_page_size;
+};
#endif
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 2f215b93db6b..0eb141c41416 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
continue;
slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
- if (slave_id >= dev->dev->num_slaves)
+ if (slave_id >= dev->dev->num_vfs + 1)
return;
tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
form_cache_ag = get_cached_alias_guid(dev, port_num,
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index d1f5f1dd77b0..56a593e0ae5d 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -61,6 +61,11 @@ struct cm_generic_msg {
__be32 remote_comm_id;
};
+struct cm_sidr_generic_msg {
+ struct ib_mad_hdr hdr;
+ __be32 request_id;
+};
+
struct cm_req_msg {
unsigned char unused[0x60];
union ib_gid primary_path_sgid;
@@ -69,28 +74,62 @@ struct cm_req_msg {
static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
{
- struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
- msg->local_comm_id = cpu_to_be32(cm_id);
+ if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ struct cm_sidr_generic_msg *msg =
+ (struct cm_sidr_generic_msg *)mad;
+ msg->request_id = cpu_to_be32(cm_id);
+ } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+ pr_err("trying to set local_comm_id in SIDR_REP\n");
+ return;
+ } else {
+ struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+ msg->local_comm_id = cpu_to_be32(cm_id);
+ }
}
static u32 get_local_comm_id(struct ib_mad *mad)
{
- struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-
- return be32_to_cpu(msg->local_comm_id);
+ if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ struct cm_sidr_generic_msg *msg =
+ (struct cm_sidr_generic_msg *)mad;
+ return be32_to_cpu(msg->request_id);
+ } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+ pr_err("trying to set local_comm_id in SIDR_REP\n");
+ return -1;
+ } else {
+ struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+ return be32_to_cpu(msg->local_comm_id);
+ }
}
static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
{
- struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
- msg->remote_comm_id = cpu_to_be32(cm_id);
+ if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+ struct cm_sidr_generic_msg *msg =
+ (struct cm_sidr_generic_msg *)mad;
+ msg->request_id = cpu_to_be32(cm_id);
+ } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ pr_err("trying to set remote_comm_id in SIDR_REQ\n");
+ return;
+ } else {
+ struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+ msg->remote_comm_id = cpu_to_be32(cm_id);
+ }
}
static u32 get_remote_comm_id(struct ib_mad *mad)
{
- struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-
- return be32_to_cpu(msg->remote_comm_id);
+ if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+ struct cm_sidr_generic_msg *msg =
+ (struct cm_sidr_generic_msg *)mad;
+ return be32_to_cpu(msg->request_id);
+ } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ pr_err("trying to set remote_comm_id in SIDR_REQ\n");
+ return -1;
+ } else {
+ struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+ return be32_to_cpu(msg->remote_comm_id);
+ }
}
static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
@@ -282,19 +321,21 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
u32 sl_cm_id;
int pv_cm_id = -1;
- sl_cm_id = get_local_comm_id(mad);
-
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
- mad->mad_hdr.attr_id == CM_REP_ATTR_ID) {
+ mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ sl_cm_id = get_local_comm_id(mad);
id = id_map_alloc(ibdev, slave_id, sl_cm_id);
if (IS_ERR(id)) {
mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
__func__, slave_id, sl_cm_id);
return PTR_ERR(id);
}
- } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) {
+ } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
return 0;
} else {
+ sl_cm_id = get_local_comm_id(mad);
id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
}
@@ -315,14 +356,18 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
}
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
- struct ib_mad *mad)
+ struct ib_mad *mad)
{
u32 pv_cm_id;
struct id_map_entry *id;
- if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) {
+ if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
union ib_gid gid;
+ if (!slave)
+ return 0;
+
gid = gid_from_req_msg(ibdev, mad);
*slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
if (*slave < 0) {
@@ -341,7 +386,8 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
return -ENOENT;
}
- *slave = id->slave_id;
+ if (slave)
+ *slave = id->slave_id;
set_remote_comm_id(mad, id->sl_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cc40f08ca8f1..5f640814cc81 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -564,7 +564,7 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
}
static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
- unsigned tail, struct mlx4_cqe *cqe)
+ unsigned tail, struct mlx4_cqe *cqe, int is_eth)
{
struct mlx4_ib_proxy_sqp_hdr *hdr;
@@ -574,12 +574,20 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
DMA_FROM_DEVICE);
hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
- wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
- wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
wc->dlid_path_bits = 0;
+ if (is_eth) {
+ wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
+ memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
+ memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
+ wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
+ } else {
+ wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
+ wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
+ }
+
return 0;
}
@@ -594,6 +602,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_srq *msrq = NULL;
int is_send;
int is_error;
+ int is_eth;
u32 g_mlpath_rqpn;
u16 wqe_ctr;
unsigned tail = 0;
@@ -778,11 +787,15 @@ repoll:
break;
}
+ is_eth = (rdma_port_get_link_layer(wc->qp->device,
+ (*cur_qp)->port) ==
+ IB_LINK_LAYER_ETHERNET);
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
if ((*cur_qp)->mlx4_ib_qp_type &
(MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
- return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);
+ return use_tunnel_data(*cur_qp, cq, wc, tail,
+ cqe, is_eth);
}
wc->slid = be16_to_cpu(cqe->rlid);
@@ -793,20 +806,21 @@ repoll:
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
- if (rdma_port_get_link_layer(wc->qp->device,
- (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
+ if (is_eth) {
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
- else
- wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
- if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
- wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
- MLX4_CQE_VID_MASK;
+ if (be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_VLAN_PRESENT_MASK) {
+ wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
+ MLX4_CQE_VID_MASK;
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
} else {
+ wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
wc->vlan_id = 0xffff;
}
- wc->wc_flags |= IB_WC_WITH_VLAN;
- memcpy(wc->smac, cqe->smac, ETH_ALEN);
- wc->wc_flags |= IB_WC_WITH_SMAC;
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f2a3f48107e7..fd36ec672632 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -467,6 +467,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
int ret = 0;
u16 tun_pkey_ix;
u16 cached_pkey;
+ u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
if (dest_qpt > IB_QPT_GSI)
return -EINVAL;
@@ -509,6 +510,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
* The driver will set the force loopback bit in post_send */
memset(&attr, 0, sizeof attr);
attr.port_num = port;
+ if (is_eth) {
+ memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
+ attr.ah_flags = IB_AH_GRH;
+ }
ah = ib_create_ah(tun_ctx->pd, &attr);
if (IS_ERR(ah))
return -ENOMEM;
@@ -540,11 +545,36 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
/* adjust tunnel data */
tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
- tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
- tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
+ if (is_eth) {
+ u16 vlan = 0;
+ if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
+ NULL)) {
+ /* VST mode */
+ if (vlan != wc->vlan_id)
+ /* Packet vlan is not the VST-assigned vlan.
+ * Drop the packet.
+ */
+ goto out;
+ else
+ /* Remove the vlan tag before forwarding
+ * the packet to the VF.
+ */
+ vlan = 0xffff;
+ } else {
+ vlan = wc->vlan_id;
+ }
+
+ tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
+ memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
+ memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
+ } else {
+ tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
+ tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
+ }
+
ib_dma_sync_single_for_device(&dev->ib_dev,
tun_qp->tx_ring[tun_tx_ix].buf.map,
sizeof (struct mlx4_rcv_tunnel_mad),
@@ -580,6 +610,41 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
int err;
int slave;
u8 *slave_id;
+ int is_eth = 0;
+
+ if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
+ is_eth = 0;
+ else
+ is_eth = 1;
+
+ if (is_eth) {
+ if (!(wc->wc_flags & IB_WC_GRH)) {
+ mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
+ return -EINVAL;
+ }
+ if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
+ mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
+ return -EINVAL;
+ }
+ if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
+ mlx4_ib_warn(ibdev, "failed matching grh\n");
+ return -ENOENT;
+ }
+ if (slave >= dev->dev->caps.sqp_demux) {
+ mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
+ slave, dev->dev->caps.sqp_demux);
+ return -ENOENT;
+ }
+
+ if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
+ return 0;
+
+ err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
+ if (err)
+ pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
+ slave, err);
+ return 0;
+ }
/* Initially assume that this mad is for us */
slave = mlx4_master_func_num(dev->dev);
@@ -1076,8 +1141,9 @@ static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
- enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
- u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad)
+ enum ib_qp_type dest_qpt, u16 pkey_index,
+ u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
+ u8 *s_mac, struct ib_mad *mad)
{
struct ib_sge list;
struct ib_send_wr wr, *bad_wr;
@@ -1166,6 +1232,9 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
wr.num_sge = 1;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
+ if (s_mac)
+ memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
+
ret = ib_post_send(send_qp, &wr, &bad_wr);
out:
@@ -1174,6 +1243,22 @@ out:
return ret;
}
+static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
+{
+ if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
+ return slave;
+ return mlx4_get_base_gid_ix(dev->dev, slave, port);
+}
+
+static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
+ struct ib_ah_attr *ah_attr)
+{
+ if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
+ ah_attr->grh.sgid_index = slave;
+ else
+ ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
+}
+
static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
{
struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
@@ -1184,6 +1269,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
struct ib_ah_attr ah_attr;
u8 *slave_id;
int slave;
+ int port;
/* Get slave that sent this packet */
if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
@@ -1260,12 +1346,18 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
ah.ibah.device = ctx->ib_dev;
mlx4_ib_query_ah(&ah.ibah, &ah_attr);
- if ((ah_attr.ah_flags & IB_AH_GRH) &&
- (ah_attr.grh.sgid_index != slave)) {
- mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n",
- slave, ah_attr.grh.sgid_index);
+ if (ah_attr.ah_flags & IB_AH_GRH)
+ fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
+
+ port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
+ if (port < 0)
return;
- }
+ ah_attr.port_num = port;
+ memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
+ ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
+ /* if slave have default vlan use it */
+ mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
+ &ah_attr.vlan_id, &ah_attr.sl);
mlx4_ib_send_to_wire(dev, slave, ctx->port,
is_proxy_qp0(dev, wc->src_qp, slave) ?
@@ -1273,7 +1365,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
be16_to_cpu(tunnel->hdr.pkey_index),
be32_to_cpu(tunnel->hdr.remote_qpn),
be32_to_cpu(tunnel->hdr.qkey),
- &ah_attr, &tunnel->mad);
+ &ah_attr, wc->smac, &tunnel->mad);
}
static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
@@ -1850,7 +1942,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
ctx->port = port;
ctx->ib_dev = &dev->ib_dev;
- for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+ for (i = 0;
+ i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1));
+ i++) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev->dev, i);
+
+ if (!test_bit(port - 1, actv_ports.ports))
+ continue;
+
ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
if (ret) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f9c12e92fdd6..6cb85467dde7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1546,7 +1546,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
iboe = &ibdev->iboe;
spin_lock(&iboe->lock);
- for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+ for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
if ((netif_is_bond_master(real_dev) &&
(real_dev == iboe->masters[port - 1])) ||
(!netif_is_bond_master(real_dev) &&
@@ -1569,14 +1569,14 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
iboe = &ibdev->iboe;
- for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+ for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
if ((netif_is_bond_master(real_dev) &&
(real_dev == iboe->masters[port - 1])) ||
(!netif_is_bond_master(real_dev) &&
(real_dev == iboe->netdevs[port - 1])))
break;
- if ((port == 0) || (port > MLX4_MAX_PORTS))
+ if ((port == 0) || (port > ibdev->dev->caps.num_ports))
return 0;
else
return port;
@@ -1626,7 +1626,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
union ib_gid gid;
- if ((port == 0) || (port > MLX4_MAX_PORTS))
+ if ((port == 0) || (port > ibdev->dev->caps.num_ports))
return;
/* IPv4 gids */
@@ -1888,14 +1888,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
pr_info_once("%s", mlx4_ib_version);
- mlx4_foreach_non_ib_transport_port(i, dev)
- num_ports++;
-
- if (mlx4_is_mfunc(dev) && num_ports) {
- dev_err(&dev->pdev->dev, "RoCE is not supported over SRIOV as yet\n");
- return NULL;
- }
-
num_ports = 0;
mlx4_foreach_ib_transport_port(i, dev)
num_ports++;
@@ -2331,17 +2323,24 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
struct mlx4_dev *dev = ibdev->dev;
int i;
unsigned long flags;
+ struct mlx4_active_ports actv_ports;
+ unsigned int ports;
+ unsigned int first_port;
if (!mlx4_is_master(dev))
return;
- dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
+ actv_ports = mlx4_get_active_ports(dev, slave);
+ ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+ first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+
+ dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
if (!dm) {
pr_err("failed to allocate memory for tunneling qp update\n");
goto out;
}
- for (i = 0; i < dev->caps.num_ports; i++) {
+ for (i = 0; i < ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) {
pr_err("failed to allocate memory for tunneling qp update work struct\n");
@@ -2353,9 +2352,9 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
}
}
/* initialize or tear down tunnel QPs for the slave */
- for (i = 0; i < dev->caps.num_ports; i++) {
+ for (i = 0; i < ports; i++) {
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
- dm[i]->port = i + 1;
+ dm[i]->port = first_port + i + 1;
dm[i]->slave = slave;
dm[i]->do_init = do_init;
dm[i]->dev = ibdev;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 25b2cdff00f8..ed327e6c8fdc 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -215,8 +215,9 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
}
mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
spin_unlock(&dev->sm_lock);
- return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port,
- IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, mad);
+ return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
+ ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
+ &ah_attr, NULL, mad);
}
static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index a230683af940..f589522fddfd 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -241,6 +241,22 @@ struct mlx4_ib_proxy_sqp_hdr {
struct mlx4_rcv_tunnel_hdr tun;
} __packed;
+struct mlx4_roce_smac_vlan_info {
+ u64 smac;
+ int smac_index;
+ int smac_port;
+ u64 candidate_smac;
+ int candidate_smac_index;
+ int candidate_smac_port;
+ u16 vid;
+ int vlan_index;
+ int vlan_port;
+ u16 candidate_vid;
+ int candidate_vlan_index;
+ int candidate_vlan_port;
+ int update_vid;
+};
+
struct mlx4_ib_qp {
struct ib_qp ibqp;
struct mlx4_qp mqp;
@@ -273,8 +289,9 @@ struct mlx4_ib_qp {
struct list_head gid_list;
struct list_head steering_rules;
struct mlx4_ib_buf *sqp_proxy_rcv;
+ struct mlx4_roce_smac_vlan_info pri;
+ struct mlx4_roce_smac_vlan_info alt;
u64 reg_id;
-
};
struct mlx4_ib_srq {
@@ -720,9 +737,12 @@ void mlx4_ib_tunnels_update_work(struct work_struct *work);
int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
enum ib_qp_type qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad);
+
int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
- u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad);
+ u32 qkey, struct ib_ah_attr *attr, u8 *s_mac,
+ struct ib_mad *mad);
+
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index d8f4d1fe8494..aadf7f82e1f3 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -662,10 +662,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (!sqp)
return -ENOMEM;
qp = &sqp->qp;
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
} else {
qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
}
} else
qp = *caller_qp;
@@ -940,11 +944,32 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
{
struct mlx4_ib_cq *send_cq, *recv_cq;
- if (qp->state != IB_QPS_RESET)
+ if (qp->state != IB_QPS_RESET) {
if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
pr_warn("modify QP %06x to RESET failed.\n",
qp->mqp.qpn);
+ if (qp->pri.smac) {
+ mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
+ qp->pri.smac = 0;
+ }
+ if (qp->alt.smac) {
+ mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
+ qp->alt.smac = 0;
+ }
+ if (qp->pri.vid < 0x1000) {
+ mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
+ qp->pri.vid = 0xFFFF;
+ qp->pri.candidate_vid = 0xFFFF;
+ qp->pri.update_vid = 0;
+ }
+ if (qp->alt.vid < 0x1000) {
+ mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
+ qp->alt.vid = 0xFFFF;
+ qp->alt.candidate_vid = 0xFFFF;
+ qp->alt.update_vid = 0;
+ }
+ }
get_cqs(qp, &send_cq, &recv_cq);
@@ -1057,6 +1082,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
qp = kzalloc(sizeof *qp, GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
/* fall through */
case IB_QPT_UD:
{
@@ -1188,12 +1215,13 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
- u8 port)
+ struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
{
int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET;
int vidx;
int smac_index;
+ int err;
path->grh_mylmc = ah->src_path_bits & 0x7f;
@@ -1223,61 +1251,103 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
}
if (is_eth) {
- path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 7) << 3);
-
if (!(ah->ah_flags & IB_AH_GRH))
return -1;
- memcpy(path->dmac, ah->dmac, ETH_ALEN);
- path->ackto = MLX4_IB_LINK_TYPE_ETH;
- /* find the index into MAC table for IBoE */
- if (!is_zero_ether_addr((const u8 *)&smac)) {
- if (mlx4_find_cached_mac(dev->dev, port, smac,
- &smac_index))
- return -ENOENT;
- } else {
- smac_index = 0;
- }
-
- path->grh_mylmc &= 0x80 | smac_index;
+ path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+ ((port - 1) << 6) | ((ah->sl & 7) << 3);
path->feup |= MLX4_FEUP_FORCE_ETH_UP;
if (vlan_tag < 0x1000) {
- if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
- return -ENOENT;
-
- path->vlan_index = vidx;
- path->fl = 1 << 6;
+ if (smac_info->vid < 0x1000) {
+ /* both valid vlan ids */
+ if (smac_info->vid != vlan_tag) {
+ /* different VIDs. unreg old and reg new */
+ err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
+ if (err)
+ return err;
+ smac_info->candidate_vid = vlan_tag;
+ smac_info->candidate_vlan_index = vidx;
+ smac_info->candidate_vlan_port = port;
+ smac_info->update_vid = 1;
+ path->vlan_index = vidx;
+ } else {
+ path->vlan_index = smac_info->vlan_index;
+ }
+ } else {
+ /* no current vlan tag in qp */
+ err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
+ if (err)
+ return err;
+ smac_info->candidate_vid = vlan_tag;
+ smac_info->candidate_vlan_index = vidx;
+ smac_info->candidate_vlan_port = port;
+ smac_info->update_vid = 1;
+ path->vlan_index = vidx;
+ }
path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
+ path->fl = 1 << 6;
+ } else {
+ /* have current vlan tag. unregister it at modify-qp success */
+ if (smac_info->vid < 0x1000) {
+ smac_info->candidate_vid = 0xFFFF;
+ smac_info->update_vid = 1;
+ }
}
- } else
+
+ /* get smac_index for RoCE use.
+ * If no smac was yet assigned, register one.
+ * If one was already assigned, but the new mac differs,
+ * unregister the old one and register the new one.
+ */
+ if (!smac_info->smac || smac_info->smac != smac) {
+ /* register candidate now, unreg if needed, after success */
+ smac_index = mlx4_register_mac(dev->dev, port, smac);
+ if (smac_index >= 0) {
+ smac_info->candidate_smac_index = smac_index;
+ smac_info->candidate_smac = smac;
+ smac_info->candidate_smac_port = port;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ smac_index = smac_info->smac_index;
+ }
+
+ memcpy(path->dmac, ah->dmac, 6);
+ path->ackto = MLX4_IB_LINK_TYPE_ETH;
+ /* put MAC table smac index for IBoE */
+ path->grh_mylmc = (u8) (smac_index) | 0x80;
+ } else {
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+ }
return 0;
}
static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
enum ib_qp_attr_mask qp_attr_mask,
+ struct mlx4_ib_qp *mqp,
struct mlx4_qp_path *path, u8 port)
{
return _mlx4_set_path(dev, &qp->ah_attr,
mlx4_mac_to_u64((u8 *)qp->smac),
(qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
- path, port);
+ path, &mqp->pri, port);
}
static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
const struct ib_qp_attr *qp,
enum ib_qp_attr_mask qp_attr_mask,
+ struct mlx4_ib_qp *mqp,
struct mlx4_qp_path *path, u8 port)
{
return _mlx4_set_path(dev, &qp->alt_ah_attr,
mlx4_mac_to_u64((u8 *)qp->alt_smac),
(qp_attr_mask & IB_QP_ALT_VID) ?
qp->alt_vlan_id : 0xffff,
- path, port);
+ path, &mqp->alt, port);
}
static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
@@ -1292,6 +1362,37 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
}
}
+static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
+ struct mlx4_qp_context *context)
+{
+ struct net_device *ndev;
+ u64 u64_mac;
+ int smac_index;
+
+
+ ndev = dev->iboe.netdevs[qp->port - 1];
+ if (ndev) {
+ smac = ndev->dev_addr;
+ u64_mac = mlx4_mac_to_u64(smac);
+ } else {
+ u64_mac = dev->dev->caps.def_mac[qp->port];
+ }
+
+ context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
+ if (!qp->pri.smac) {
+ smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
+ if (smac_index >= 0) {
+ qp->pri.candidate_smac_index = smac_index;
+ qp->pri.candidate_smac = u64_mac;
+ qp->pri.candidate_smac_port = qp->port;
+ context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
+ } else {
+ return -ENOENT;
+ }
+ }
+ return 0;
+}
+
static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state)
@@ -1403,7 +1504,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_AV) {
- if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
+ if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
attr_mask & IB_QP_PORT ?
attr->port_num : qp->port))
goto out;
@@ -1426,7 +1527,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
dev->dev->caps.pkey_table_len[attr->alt_port_num])
goto out;
- if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
+ if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
+ &context->alt_path,
attr->alt_port_num))
goto out;
@@ -1532,6 +1634,20 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->pri_path.fl = 0x80;
context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
}
+ if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+ IB_LINK_LAYER_ETHERNET) {
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
+ qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
+ context->pri_path.feup = 1 << 7; /* don't fsm */
+ /* handle smac_index */
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
+ qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
+ qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
+ err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
+ if (err)
+ return -EINVAL;
+ }
+ }
}
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
@@ -1619,28 +1735,113 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
* If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP.
*/
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
- mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
- ibqp->srq ? to_msrq(ibqp->srq): NULL);
- if (send_cq != recv_cq)
- mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+ if (new_state == IB_QPS_RESET) {
+ if (!ibqp->uobject) {
+ mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
+ ibqp->srq ? to_msrq(ibqp->srq) : NULL);
+ if (send_cq != recv_cq)
+ mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+ qp->sq_next_wqe = 0;
+ if (qp->rq.wqe_cnt)
+ *qp->db.db = 0;
- qp->rq.head = 0;
- qp->rq.tail = 0;
- qp->sq.head = 0;
- qp->sq.tail = 0;
- qp->sq_next_wqe = 0;
- if (qp->rq.wqe_cnt)
- *qp->db.db = 0;
+ if (qp->flags & MLX4_IB_QP_NETIF)
+ mlx4_ib_steer_qp_reg(dev, qp, 0);
+ }
+ if (qp->pri.smac) {
+ mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
+ qp->pri.smac = 0;
+ }
+ if (qp->alt.smac) {
+ mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
+ qp->alt.smac = 0;
+ }
+ if (qp->pri.vid < 0x1000) {
+ mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
+ qp->pri.vid = 0xFFFF;
+ qp->pri.candidate_vid = 0xFFFF;
+ qp->pri.update_vid = 0;
+ }
- if (qp->flags & MLX4_IB_QP_NETIF)
- mlx4_ib_steer_qp_reg(dev, qp, 0);
+ if (qp->alt.vid < 0x1000) {
+ mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
+ qp->alt.vid = 0xFFFF;
+ qp->alt.candidate_vid = 0xFFFF;
+ qp->alt.update_vid = 0;
+ }
}
-
out:
if (err && steer_qp)
mlx4_ib_steer_qp_reg(dev, qp, 0);
kfree(context);
+ if (qp->pri.candidate_smac) {
+ if (err) {
+ mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
+ } else {
+ if (qp->pri.smac)
+ mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
+ qp->pri.smac = qp->pri.candidate_smac;
+ qp->pri.smac_index = qp->pri.candidate_smac_index;
+ qp->pri.smac_port = qp->pri.candidate_smac_port;
+ }
+ qp->pri.candidate_smac = 0;
+ qp->pri.candidate_smac_index = 0;
+ qp->pri.candidate_smac_port = 0;
+ }
+ if (qp->alt.candidate_smac) {
+ if (err) {
+ mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
+ } else {
+ if (qp->alt.smac)
+ mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
+ qp->alt.smac = qp->alt.candidate_smac;
+ qp->alt.smac_index = qp->alt.candidate_smac_index;
+ qp->alt.smac_port = qp->alt.candidate_smac_port;
+ }
+ qp->alt.candidate_smac = 0;
+ qp->alt.candidate_smac_index = 0;
+ qp->alt.candidate_smac_port = 0;
+ }
+
+ if (qp->pri.update_vid) {
+ if (err) {
+ if (qp->pri.candidate_vid < 0x1000)
+ mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
+ qp->pri.candidate_vid);
+ } else {
+ if (qp->pri.vid < 0x1000)
+ mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
+ qp->pri.vid);
+ qp->pri.vid = qp->pri.candidate_vid;
+ qp->pri.vlan_port = qp->pri.candidate_vlan_port;
+ qp->pri.vlan_index = qp->pri.candidate_vlan_index;
+ }
+ qp->pri.candidate_vid = 0xFFFF;
+ qp->pri.update_vid = 0;
+ }
+
+ if (qp->alt.update_vid) {
+ if (err) {
+ if (qp->alt.candidate_vid < 0x1000)
+ mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
+ qp->alt.candidate_vid);
+ } else {
+ if (qp->alt.vid < 0x1000)
+ mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
+ qp->alt.vid);
+ qp->alt.vid = qp->alt.candidate_vid;
+ qp->alt.vlan_port = qp->alt.candidate_vlan_port;
+ qp->alt.vlan_index = qp->alt.candidate_vlan_index;
+ }
+ qp->alt.candidate_vid = 0xFFFF;
+ qp->alt.update_vid = 0;
+ }
+
return err;
}
@@ -1842,9 +2043,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
{
struct ib_device *ib_dev = sqp->qp.ibqp.device;
struct mlx4_wqe_mlx_seg *mlx = wqe;
+ struct mlx4_wqe_ctrl_seg *ctrl = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
- struct net_device *ndev;
union ib_gid sgid;
u16 pkey;
int send_size;
@@ -1868,12 +2069,11 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
/* When multi-function is enabled, the ib_core gid
* indexes don't necessarily match the hw ones, so
* we must use our own cache */
- sgid.global.subnet_prefix =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- subnet_prefix;
- sgid.global.interface_id =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- guid_cache[ah->av.ib.gid_index];
+ err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev,
+ be32_to_cpu(ah->av.ib.port_pd) >> 24,
+ ah->av.ib.gid_index, &sgid.raw[0]);
+ if (err)
+ return err;
} else {
err = ib_get_cached_gid(ib_dev,
be32_to_cpu(ah->av.ib.port_pd) >> 24,
@@ -1902,6 +2102,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
sqp->ud_header.grh.flow_label =
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
+ if (is_eth)
+ memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
+ else {
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
/* When multi-function is enabled, the ib_core gid
* indexes don't necessarily match the hw ones, so
@@ -1917,6 +2120,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
be32_to_cpu(ah->av.ib.port_pd) >> 24,
ah->av.ib.gid_index,
&sqp->ud_header.grh.source_gid);
+ }
memcpy(sqp->ud_header.grh.destination_gid.raw,
ah->av.ib.dgid, 16);
}
@@ -1949,16 +2153,23 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
if (is_eth) {
u8 *smac;
+ struct in6_addr in6;
+
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
mlx->sched_prio = cpu_to_be16(pcp);
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
/* FIXME: cache smac value? */
- ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1];
- if (!ndev)
- return -ENODEV;
- smac = ndev->dev_addr;
+ memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
+ memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
+ memcpy(&in6, sgid.raw, sizeof(in6));
+
+ if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev))
+ smac = to_mdev(sqp->qp.ibqp.device)->
+ iboe.netdevs[sqp->qp.port - 1]->dev_addr;
+ else /* use the src mac of the tunnel */
+ smac = ah->av.eth.s_mac;
memcpy(sqp->ud_header.eth.smac_h, smac, 6);
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
@@ -2190,6 +2401,8 @@ static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_
hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index);
hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+ memcpy(hdr.mac, ah->av.eth.mac, 6);
+ hdr.vlan = ah->av.eth.vlan;
spc = MLX4_INLINE_ALIGN -
((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index db2ea31df832..5a38e43eca65 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -627,6 +627,7 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
int port;
struct kobject *p, *t;
struct mlx4_port *mport;
+ struct mlx4_active_ports actv_ports;
get_name(dev, name, slave, sizeof name);
@@ -649,7 +650,11 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
goto err_ports;
}
+ actv_ports = mlx4_get_active_ports(dev->dev, slave);
+
for (port = 1; port <= dev->dev->caps.num_ports; ++port) {
+ if (!test_bit(port - 1, actv_ports.ports))
+ continue;
err = add_port(dev, port, slave);
if (err)
goto err_add;
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index b4147c0b14b7..c3a1b061838d 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -796,7 +796,7 @@ static void __exit act2000_exit(void)
act2000_card *last;
while (card) {
unregister_card(card);
- del_timer(&card->ptimer);
+ del_timer_sync(&card->ptimer);
card = card->next;
}
card = cards;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index fb4f1bac0133..1c5dc345e7c5 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -86,12 +86,13 @@ isdn_divert_read(struct file *file, char __user *buf, size_t count, loff_t *off)
struct divert_info *inf;
int len;
- if (!*((struct divert_info **) file->private_data)) {
+ if (!(inf = *((struct divert_info **) file->private_data))) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- interruptible_sleep_on(&(rd_queue));
+ wait_event_interruptible(rd_queue, (inf =
+ *((struct divert_info **) file->private_data)));
}
- if (!(inf = *((struct divert_info **) file->private_data)))
+ if (!inf)
return (0);
inf->usage_cnt--; /* new usage count */
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 2be1c8a3bb5f..d8ef64da26f1 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -509,7 +509,8 @@ static void
set_arcofi(struct IsdnCardState *cs, int bc) {
cs->dc.isac.arcofi_bc = bc;
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_COP_5);
- interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(cs->dc.isac.arcofi_wait,
+ cs->dc.isac.arcofi_state == ARCOFI_NOP);
}
static int
@@ -528,7 +529,8 @@ check_arcofi(struct IsdnCardState *cs)
}
cs->dc.isac.arcofi_bc = 0;
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_VERSION);
- interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(cs->dc.isac.arcofi_wait,
+ cs->dc.isac.arcofi_state == ARCOFI_NOP);
if (!test_and_clear_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags)) {
debugl1(cs, "Arcofi response received %d bytes", cs->dc.isac.mon_rxp);
p = cs->dc.isac.mon_rx;
@@ -595,7 +597,8 @@ check_arcofi(struct IsdnCardState *cs)
Elsa_Types[cs->subtyp],
cs->hw.elsa.base + 8);
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_XOP_0);
- interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(cs->dc.isac.arcofi_wait,
+ cs->dc.isac.arcofi_state == ARCOFI_NOP);
return (1);
}
return (0);
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index 3f84dd8f1757..a2a358c1dc8e 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -573,7 +573,8 @@ modem_l2l1(struct PStack *st, int pr, void *arg)
test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
bcs->cs->dc.isac.arcofi_bc = st->l1.bc;
arcofi_fsm(bcs->cs, ARCOFI_START, &ARCOFI_XOP_0);
- interruptible_sleep_on(&bcs->cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(bcs->cs->dc.isac.arcofi_wait,
+ bcs->cs->dc.isac.arcofi_state == ARCOFI_NOP);
bcs->cs->hw.elsa.MFlag = 1;
} else {
printk(KERN_WARNING "ElsaSer: unknown pr %x\n", pr);
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index b61e8d5e84ad..7b5fd8fb1761 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -175,14 +175,15 @@ hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
int len;
hysdn_card *card = PDE_DATA(file_inode(file));
- if (!*((struct log_data **) file->private_data)) {
+ if (!(inf = *((struct log_data **) file->private_data))) {
struct procdata *pd = card->proclog;
if (file->f_flags & O_NONBLOCK)
return (-EAGAIN);
- interruptible_sleep_on(&(pd->rd_queue));
+ wait_event_interruptible(pd->rd_queue, (inf =
+ *((struct log_data **) file->private_data)));
}
- if (!(inf = *((struct log_data **) file->private_data)))
+ if (!inf)
return (0);
inf->usage_cnt--; /* new usage count */
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 9bb12ba3191f..9b856e1890d1 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -777,7 +777,8 @@ isdn_readbchan(int di, int channel, u_char *buf, u_char *fp, int len, wait_queue
return 0;
if (skb_queue_empty(&dev->drv[di]->rpqueue[channel])) {
if (sleep)
- interruptible_sleep_on(sleep);
+ wait_event_interruptible(*sleep,
+ !skb_queue_empty(&dev->drv[di]->rpqueue[channel]));
else
return 0;
}
@@ -1072,7 +1073,8 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
retval = -EAGAIN;
goto out;
}
- interruptible_sleep_on(&(dev->info_waitq));
+ wait_event_interruptible(dev->info_waitq,
+ file->private_data);
}
p = isdn_statstr();
file->private_data = NULL;
@@ -1128,7 +1130,8 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
retval = -EAGAIN;
goto out;
}
- interruptible_sleep_on(&(dev->drv[drvidx]->st_waitq));
+ wait_event_interruptible(dev->drv[drvidx]->st_waitq,
+ dev->drv[drvidx]->stavail);
}
if (dev->drv[drvidx]->interface->readstat) {
if (count > dev->drv[drvidx]->stavail)
@@ -1188,8 +1191,8 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
goto out;
}
chidx = isdn_minor2chan(minor);
- while ((retval = isdn_writebuf_stub(drvidx, chidx, buf, count)) == 0)
- interruptible_sleep_on(&dev->drv[drvidx]->snd_waitq[chidx]);
+ wait_event_interruptible(dev->drv[drvidx]->snd_waitq[chidx],
+ (retval = isdn_writebuf_stub(drvidx, chidx, buf, count)));
goto out;
}
if (minor <= ISDN_MINOR_CTRLMAX) {
@@ -2378,7 +2381,7 @@ static void __exit isdn_exit(void)
}
isdn_tty_exit();
unregister_chrdev(ISDN_MAJOR, "isdn");
- del_timer(&dev->timer);
+ del_timer_sync(&dev->timer);
/* call vfree with interrupts enabled, else it will hang */
vfree(dev);
printk(KERN_NOTICE "ISDN-subsystem unloaded\n");
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 38ceac5053a0..a5da511e3c9a 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -378,10 +378,15 @@ isdn_ppp_release(int min, struct file *file)
is->slcomp = NULL;
#endif
#ifdef CONFIG_IPPP_FILTER
- kfree(is->pass_filter);
- is->pass_filter = NULL;
- kfree(is->active_filter);
- is->active_filter = NULL;
+ if (is->pass_filter) {
+ sk_unattached_filter_destroy(is->pass_filter);
+ is->pass_filter = NULL;
+ }
+
+ if (is->active_filter) {
+ sk_unattached_filter_destroy(is->active_filter);
+ is->active_filter = NULL;
+ }
#endif
/* TODO: if this was the previous master: link the stuff to the new master */
@@ -629,25 +634,41 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_IPPP_FILTER
case PPPIOCSPASS:
{
+ struct sock_fprog fprog;
struct sock_filter *code;
- int len = get_filter(argp, &code);
+ int err, len = get_filter(argp, &code);
+
if (len < 0)
return len;
- kfree(is->pass_filter);
- is->pass_filter = code;
- is->pass_len = len;
- break;
+
+ fprog.len = len;
+ fprog.filter = code;
+
+ if (is->pass_filter)
+ sk_unattached_filter_destroy(is->pass_filter);
+ err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+ kfree(code);
+
+ return err;
}
case PPPIOCSACTIVE:
{
+ struct sock_fprog fprog;
struct sock_filter *code;
- int len = get_filter(argp, &code);
+ int err, len = get_filter(argp, &code);
+
if (len < 0)
return len;
- kfree(is->active_filter);
- is->active_filter = code;
- is->active_len = len;
- break;
+
+ fprog.len = len;
+ fprog.filter = code;
+
+ if (is->active_filter)
+ sk_unattached_filter_destroy(is->active_filter);
+ err = sk_unattached_filter_create(&is->active_filter, &fprog);
+ kfree(code);
+
+ return err;
}
#endif /* CONFIG_IPPP_FILTER */
default:
@@ -1147,14 +1168,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
}
if (is->pass_filter
- && sk_run_filter(skb, is->pass_filter) == 0) {
+ && SK_RUN_FILTER(is->pass_filter, skb) == 0) {
if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
kfree_skb(skb);
return;
}
if (!(is->active_filter
- && sk_run_filter(skb, is->active_filter) == 0)) {
+ && SK_RUN_FILTER(is->active_filter, skb) == 0)) {
if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
lp->huptimer = 0;
@@ -1293,14 +1314,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
}
if (ipt->pass_filter
- && sk_run_filter(skb, ipt->pass_filter) == 0) {
+ && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) {
if (ipt->debug & 0x4)
printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
kfree_skb(skb);
goto unlock;
}
if (!(ipt->active_filter
- && sk_run_filter(skb, ipt->active_filter) == 0)) {
+ && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) {
if (ipt->debug & 0x4)
printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
lp->huptimer = 0;
@@ -1490,9 +1511,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
}
drop |= is->pass_filter
- && sk_run_filter(skb, is->pass_filter) == 0;
+ && SK_RUN_FILTER(is->pass_filter, skb) == 0;
drop |= is->active_filter
- && sk_run_filter(skb, is->active_filter) == 0;
+ && SK_RUN_FILTER(is->active_filter, skb) == 0;
skb_push(skb, IPPP_MAX_HEADER - 4);
return drop;
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 1eaf62273903..f02cc506fbfa 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -796,6 +796,7 @@ static void set_running_timeout(unsigned long ptr)
#endif
dev = (struct pcbit_dev *) ptr;
+ dev->l2_state = L2_DOWN;
wake_up_interruptible(&dev->set_running_wq);
}
@@ -818,7 +819,8 @@ static int set_protocol_running(struct pcbit_dev *dev)
add_timer(&dev->set_running_timer);
- interruptible_sleep_on(&dev->set_running_wq);
+ wait_event(dev->set_running_wq, dev->l2_state == L2_RUNNING ||
+ dev->l2_state == L2_DOWN);
del_timer(&dev->set_running_timer);
@@ -842,8 +844,6 @@ static int set_protocol_running(struct pcbit_dev *dev)
printk(KERN_DEBUG "pcbit: initialization failed\n");
printk(KERN_DEBUG "pcbit: firmware not loaded\n");
- dev->l2_state = L2_DOWN;
-
#ifdef DEBUG
printk(KERN_DEBUG "Bank3 = %02x\n",
readb(dev->sh_mem + BANK3));
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index 92acc81f844d..d6f19b168e8a 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -390,8 +390,8 @@ static void __exit sc_exit(void)
/*
* kill the timers
*/
- del_timer(&(sc_adapter[i]->reset_timer));
- del_timer(&(sc_adapter[i]->stat_timer));
+ del_timer_sync(&(sc_adapter[i]->reset_timer));
+ del_timer_sync(&(sc_adapter[i]->stat_timer));
/*
* Tell I4L we're toast
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 494b888a6568..89402c3b64f8 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -177,11 +177,6 @@ config NETCONSOLE_DYNAMIC
config NETPOLL
def_bool NETCONSOLE
-config NETPOLL_TRAP
- bool "Netpoll traffic trapping"
- default n
- depends on NETPOLL
-
config NET_POLL_CONTROLLER
def_bool NETPOLL
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index dcde56057fe1..b667a51ed215 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -768,11 +768,11 @@ static int ad_lacpdu_send(struct port *port)
lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
- memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+ ether_addr_copy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback lacpdus in receive.
*/
- memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(lacpdu_header->hdr.h_source, slave->perm_hwaddr);
lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
lacpdu_header->lacpdu = port->lacpdu;
@@ -810,11 +810,11 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
marker_header = (struct bond_marker_header *)skb_put(skb, length);
- memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+ ether_addr_copy(marker_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback MARKERs in receive.
*/
- memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(marker_header->hdr.h_source, slave->perm_hwaddr);
marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
marker_header->marker = *marker;
@@ -1079,7 +1079,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* detect loopback situation */
if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
&(port->actor_system))) {
- pr_err("%s: An illegal loopback occurred on adapter (%s).\nCheck the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
+ pr_err("%s: An illegal loopback occurred on adapter (%s)\n"
+ "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
port->slave->bond->dev->name,
port->slave->dev->name);
return;
@@ -1283,11 +1284,11 @@ static void ad_port_selection_logic(struct port *port)
/* meaning: the port was related to an aggregator
* but was not on the aggregator port list
*/
- pr_warn("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
- port->slave->bond->dev->name,
- port->actor_port_number,
- port->slave->dev->name,
- port->aggregator->aggregator_identifier);
+ pr_warn_ratelimited("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
+ port->slave->bond->dev->name,
+ port->actor_port_number,
+ port->slave->dev->name,
+ port->aggregator->aggregator_identifier);
}
}
/* search on all aggregators for a suitable aggregator for this port */
@@ -1444,9 +1445,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
break;
default:
- pr_warn("%s: Impossible agg select mode %d\n",
- curr->slave->bond->dev->name,
- __get_agg_selection_mode(curr->lag_ports));
+ pr_warn_ratelimited("%s: Impossible agg select mode %d\n",
+ curr->slave->bond->dev->name,
+ __get_agg_selection_mode(curr->lag_ports));
break;
}
@@ -1559,9 +1560,9 @@ static void ad_agg_selection_logic(struct aggregator *agg)
/* check if any partner replys */
if (best->is_individual) {
- pr_warn("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave ?
- best->slave->bond->dev->name : "NULL");
+ pr_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+ best->slave ?
+ best->slave->bond->dev->name : "NULL");
}
best->is_active = 1;
@@ -1948,7 +1949,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
* new aggregator
*/
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
- pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
+ pr_debug("Some port(s) related to LAG %d - replacing with LAG %d\n",
aggregator->aggregator_identifier,
new_aggregator->aggregator_identifier);
@@ -2080,8 +2081,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
/* select the active aggregator for the bond */
if (port) {
if (!port->slave) {
- pr_warn("%s: Warning: bond's first port is uninitialized\n",
- bond->dev->name);
+ pr_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2095,8 +2096,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
bond_for_each_slave_rcu(bond, slave, iter) {
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warn("%s: Warning: Found an uninitialized port\n",
- bond->dev->name);
+ pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2157,8 +2158,8 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warn("%s: Warning: port of slave %s is uninitialized\n",
- slave->dev->name, slave->bond->dev->name);
+ pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
+ slave->dev->name, slave->bond->dev->name);
return ret;
}
@@ -2310,9 +2311,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->actor_oper_port_key = (port->actor_admin_port_key &=
~AD_SPEED_KEY_BITS);
}
- pr_debug("Port %d changed link status to %s",
- port->actor_port_number,
- (link == BOND_LINK_UP) ? "UP" : "DOWN");
+ pr_debug("Port %d changed link status to %s\n",
+ port->actor_port_number,
+ link == BOND_LINK_UP ? "UP" : "DOWN");
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
*/
@@ -2390,17 +2391,16 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
}
}
- if (aggregator) {
- ad_info->aggregator_id = aggregator->aggregator_identifier;
- ad_info->ports = aggregator->num_of_ports;
- ad_info->actor_key = aggregator->actor_oper_aggregator_key;
- ad_info->partner_key = aggregator->partner_oper_aggregator_key;
- memcpy(ad_info->partner_system,
- aggregator->partner_system.mac_addr_value, ETH_ALEN);
- return 0;
- }
+ if (!aggregator)
+ return -1;
- return -1;
+ ad_info->aggregator_id = aggregator->aggregator_identifier;
+ ad_info->ports = aggregator->num_of_ports;
+ ad_info->actor_key = aggregator->actor_oper_aggregator_key;
+ ad_info->partner_key = aggregator->partner_oper_aggregator_key;
+ ether_addr_copy(ad_info->partner_system,
+ aggregator->partner_system.mac_addr_value);
+ return 0;
}
/* Wrapper used to hold bond->lock so no slave manipulation can occur */
@@ -2479,7 +2479,7 @@ out:
return NETDEV_TX_OK;
err_free:
/* no suitable interface, frame not sent */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
goto out;
}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index f4dd9592ac62..bb03b1df2f3e 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -28,7 +28,7 @@
#include <linux/netdevice.h>
#include <linux/if_ether.h>
-// General definitions
+/* General definitions */
#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
@@ -47,54 +47,54 @@ enum {
BOND_AD_COUNT = 2,
};
-// rx machine states(43.4.11 in the 802.3ad standard)
+/* rx machine states(43.4.11 in the 802.3ad standard) */
typedef enum {
AD_RX_DUMMY,
- AD_RX_INITIALIZE, // rx Machine
- AD_RX_PORT_DISABLED, // rx Machine
- AD_RX_LACP_DISABLED, // rx Machine
- AD_RX_EXPIRED, // rx Machine
- AD_RX_DEFAULTED, // rx Machine
- AD_RX_CURRENT // rx Machine
+ AD_RX_INITIALIZE, /* rx Machine */
+ AD_RX_PORT_DISABLED, /* rx Machine */
+ AD_RX_LACP_DISABLED, /* rx Machine */
+ AD_RX_EXPIRED, /* rx Machine */
+ AD_RX_DEFAULTED, /* rx Machine */
+ AD_RX_CURRENT /* rx Machine */
} rx_states_t;
-// periodic machine states(43.4.12 in the 802.3ad standard)
+/* periodic machine states(43.4.12 in the 802.3ad standard) */
typedef enum {
AD_PERIODIC_DUMMY,
- AD_NO_PERIODIC, // periodic machine
- AD_FAST_PERIODIC, // periodic machine
- AD_SLOW_PERIODIC, // periodic machine
- AD_PERIODIC_TX // periodic machine
+ AD_NO_PERIODIC, /* periodic machine */
+ AD_FAST_PERIODIC, /* periodic machine */
+ AD_SLOW_PERIODIC, /* periodic machine */
+ AD_PERIODIC_TX /* periodic machine */
} periodic_states_t;
-// mux machine states(43.4.13 in the 802.3ad standard)
+/* mux machine states(43.4.13 in the 802.3ad standard) */
typedef enum {
AD_MUX_DUMMY,
- AD_MUX_DETACHED, // mux machine
- AD_MUX_WAITING, // mux machine
- AD_MUX_ATTACHED, // mux machine
- AD_MUX_COLLECTING_DISTRIBUTING // mux machine
+ AD_MUX_DETACHED, /* mux machine */
+ AD_MUX_WAITING, /* mux machine */
+ AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
} mux_states_t;
-// tx machine states(43.4.15 in the 802.3ad standard)
+/* tx machine states(43.4.15 in the 802.3ad standard) */
typedef enum {
AD_TX_DUMMY,
- AD_TRANSMIT // tx Machine
+ AD_TRANSMIT /* tx Machine */
} tx_states_t;
-// rx indication types
+/* rx indication types */
typedef enum {
- AD_TYPE_LACPDU = 1, // type lacpdu
- AD_TYPE_MARKER // type marker
+ AD_TYPE_LACPDU = 1, /* type lacpdu */
+ AD_TYPE_MARKER /* type marker */
} pdu_type_t;
-// rx marker indication types
+/* rx marker indication types */
typedef enum {
- AD_MARKER_INFORMATION_SUBTYPE = 1, // marker imformation subtype
- AD_MARKER_RESPONSE_SUBTYPE // marker response subtype
+ AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */
+ AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */
} bond_marker_subtype_t;
-// timers types(43.4.9 in the 802.3ad standard)
+/* timers types(43.4.9 in the 802.3ad standard) */
typedef enum {
AD_CURRENT_WHILE_TIMER,
AD_ACTOR_CHURN_TIMER,
@@ -105,35 +105,35 @@ typedef enum {
#pragma pack(1)
-// Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard)
+/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */
typedef struct lacpdu {
- u8 subtype; // = LACP(= 0x01)
+ u8 subtype; /* = LACP(= 0x01) */
u8 version_number;
- u8 tlv_type_actor_info; // = actor information(type/length/value)
- u8 actor_information_length; // = 20
+ u8 tlv_type_actor_info; /* = actor information(type/length/value) */
+ u8 actor_information_length; /* = 20 */
__be16 actor_system_priority;
struct mac_addr actor_system;
__be16 actor_key;
__be16 actor_port_priority;
__be16 actor_port;
u8 actor_state;
- u8 reserved_3_1[3]; // = 0
- u8 tlv_type_partner_info; // = partner information
- u8 partner_information_length; // = 20
+ u8 reserved_3_1[3]; /* = 0 */
+ u8 tlv_type_partner_info; /* = partner information */
+ u8 partner_information_length; /* = 20 */
__be16 partner_system_priority;
struct mac_addr partner_system;
__be16 partner_key;
__be16 partner_port_priority;
__be16 partner_port;
u8 partner_state;
- u8 reserved_3_2[3]; // = 0
- u8 tlv_type_collector_info; // = collector information
- u8 collector_information_length; // = 16
+ u8 reserved_3_2[3]; /* = 0 */
+ u8 tlv_type_collector_info; /* = collector information */
+ u8 collector_information_length;/* = 16 */
__be16 collector_max_delay;
u8 reserved_12[12];
- u8 tlv_type_terminator; // = terminator
- u8 terminator_length; // = 0
- u8 reserved_50[50]; // = 0
+ u8 tlv_type_terminator; /* = terminator */
+ u8 terminator_length; /* = 0 */
+ u8 reserved_50[50]; /* = 0 */
} __packed lacpdu_t;
typedef struct lacpdu_header {
@@ -141,20 +141,20 @@ typedef struct lacpdu_header {
struct lacpdu lacpdu;
} __packed lacpdu_header_t;
-// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
+/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */
typedef struct bond_marker {
- u8 subtype; // = 0x02 (marker PDU)
- u8 version_number; // = 0x01
- u8 tlv_type; // = 0x01 (marker information)
- // = 0x02 (marker response information)
- u8 marker_length; // = 0x16
- u16 requester_port; // The number assigned to the port by the requester
- struct mac_addr requester_system; // The requester's system id
- u32 requester_transaction_id; // The transaction id allocated by the requester,
- u16 pad; // = 0
- u8 tlv_type_terminator; // = 0x00
- u8 terminator_length; // = 0x00
- u8 reserved_90[90]; // = 0
+ u8 subtype; /* = 0x02 (marker PDU) */
+ u8 version_number; /* = 0x01 */
+ u8 tlv_type; /* = 0x01 (marker information) */
+ /* = 0x02 (marker response information) */
+ u8 marker_length; /* = 0x16 */
+ u16 requester_port; /* The number assigned to the port by the requester */
+ struct mac_addr requester_system; /* The requester's system id */
+ u32 requester_transaction_id; /* The transaction id allocated by the requester, */
+ u16 pad; /* = 0 */
+ u8 tlv_type_terminator; /* = 0x00 */
+ u8 terminator_length; /* = 0x00 */
+ u8 reserved_90[90]; /* = 0 */
} __packed bond_marker_t;
typedef struct bond_marker_header {
@@ -173,7 +173,7 @@ struct port;
#pragma pack(8)
#endif
-// aggregator structure(43.4.5 in the 802.3ad standard)
+/* aggregator structure(43.4.5 in the 802.3ad standard) */
typedef struct aggregator {
struct mac_addr aggregator_mac_address;
u16 aggregator_identifier;
@@ -183,12 +183,12 @@ typedef struct aggregator {
struct mac_addr partner_system;
u16 partner_system_priority;
u16 partner_oper_aggregator_key;
- u16 receive_state; // BOOLEAN
- u16 transmit_state; // BOOLEAN
+ u16 receive_state; /* BOOLEAN */
+ u16 transmit_state; /* BOOLEAN */
struct port *lag_ports;
- // ****** PRIVATE PARAMETERS ******
- struct slave *slave; // pointer to the bond slave that this aggregator belongs to
- u16 is_active; // BOOLEAN. Indicates if this aggregator is active
+ /* ****** PRIVATE PARAMETERS ****** */
+ struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */
+ u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */
u16 num_of_ports;
} aggregator_t;
@@ -201,12 +201,12 @@ struct port_params {
u16 port_state;
};
-// port structure(43.4.6 in the 802.3ad standard)
+/* port structure(43.4.6 in the 802.3ad standard) */
typedef struct port {
u16 actor_port_number;
u16 actor_port_priority;
- struct mac_addr actor_system; // This parameter is added here although it is not specified in the standard, just for simplification
- u16 actor_system_priority; // This parameter is added here although it is not specified in the standard, just for simplification
+ struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */
+ u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */
u16 actor_port_aggregator_identifier;
bool ntt;
u16 actor_admin_port_key;
@@ -219,24 +219,24 @@ typedef struct port {
bool is_enabled;
- // ****** PRIVATE PARAMETERS ******
- u16 sm_vars; // all state machines variables for this port
- rx_states_t sm_rx_state; // state machine rx state
- u16 sm_rx_timer_counter; // state machine rx timer counter
- periodic_states_t sm_periodic_state;// state machine periodic state
- u16 sm_periodic_timer_counter; // state machine periodic timer counter
- mux_states_t sm_mux_state; // state machine mux state
- u16 sm_mux_timer_counter; // state machine mux timer counter
- tx_states_t sm_tx_state; // state machine tx state
- u16 sm_tx_timer_counter; // state machine tx timer counter(allways on - enter to transmit state 3 time per second)
- struct slave *slave; // pointer to the bond slave that this port belongs to
- struct aggregator *aggregator; // pointer to an aggregator that this port related to
- struct port *next_port_in_aggregator; // Next port on the linked list of the parent aggregator
- u32 transaction_id; // continuous number for identification of Marker PDU's;
- struct lacpdu lacpdu; // the lacpdu that will be sent for this port
+ /* ****** PRIVATE PARAMETERS ****** */
+ u16 sm_vars; /* all state machines variables for this port */
+ rx_states_t sm_rx_state; /* state machine rx state */
+ u16 sm_rx_timer_counter; /* state machine rx timer counter */
+ periodic_states_t sm_periodic_state; /* state machine periodic state */
+ u16 sm_periodic_timer_counter; /* state machine periodic timer counter */
+ mux_states_t sm_mux_state; /* state machine mux state */
+ u16 sm_mux_timer_counter; /* state machine mux timer counter */
+ tx_states_t sm_tx_state; /* state machine tx state */
+ u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ struct slave *slave; /* pointer to the bond slave that this port belongs to */
+ struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
+ struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
+ u32 transaction_id; /* continuous number for identification of Marker PDU's; */
+ struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */
} port_t;
-// system structure
+/* system structure */
struct ad_system {
u16 sys_priority;
struct mac_addr sys_mac_addr;
@@ -246,27 +246,26 @@ struct ad_system {
#pragma pack()
#endif
-// ================= AD Exported structures to the main bonding code ==================
+/* ========== AD Exported structures to the main bonding code ========== */
#define BOND_AD_INFO(bond) ((bond)->ad_info)
#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
struct ad_bond_info {
- struct ad_system system; /* 802.3ad system structure */
- u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
+ struct ad_system system; /* 802.3ad system structure */
+ u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
struct ad_slave_info {
- struct aggregator aggregator; // 802.3ad aggregator structure
- struct port port; // 802.3ad port structure
- spinlock_t state_machine_lock; /* mutex state machines vs.
- incoming LACPDU */
+ struct aggregator aggregator; /* 802.3ad aggregator structure */
+ struct port port; /* 802.3ad port structure */
+ spinlock_t state_machine_lock; /* mutex state machines vs. incoming LACPDU */
u16 id;
};
-// ================= AD Exported functions to the main bonding code ==================
+/* ========== AD Exported functions to the main bonding code ========== */
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-void bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
@@ -281,5 +280,5 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
-#endif //__BOND_3AD_H__
+#endif /* __BOND_3AD_H__ */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index e8f133e926aa..9f69e818b000 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -93,9 +93,8 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
int i;
u8 hash = 0;
- for (i = 0; i < hash_size; i++) {
+ for (i = 0; i < hash_size; i++)
hash ^= hash_start[i];
- }
return hash;
}
@@ -190,9 +189,8 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
- for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
+ for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
- }
_unlock_tx_hashtbl_bh(bond);
@@ -264,9 +262,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
hash_table[hash_index].next = next_index;
hash_table[hash_index].prev = TLB_NULL_INDEX;
- if (next_index != TLB_NULL_INDEX) {
+ if (next_index != TLB_NULL_INDEX)
hash_table[next_index].prev = hash_index;
- }
slave_info->head = hash_index;
slave_info->load +=
@@ -274,9 +271,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
}
}
- if (assigned_slave) {
+ if (assigned_slave)
hash_table[hash_index].tx_bytes += skb_len;
- }
return assigned_slave;
}
@@ -329,7 +325,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
_lock_rx_hashtbl_bh(bond);
- hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->assigned) &&
@@ -337,7 +333,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
(client_info->ip_dst == arp->ip_src) &&
(!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
/* update the clients MAC address */
- memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_src);
client_info->ntt = 1;
bond_info->rx_ntt = 1;
}
@@ -451,9 +447,8 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
*/
static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
{
- if (!bond->curr_active_slave) {
+ if (!bond->curr_active_slave)
return;
- }
if (!bond->alb_info.primary_is_promisc) {
if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1))
@@ -513,9 +508,8 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
write_lock_bh(&bond->curr_slave_lock);
- if (slave != bond->curr_active_slave) {
+ if (slave != bond->curr_active_slave)
rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
- }
write_unlock_bh(&bond->curr_slave_lock);
}
@@ -524,9 +518,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
- if (!client_info->slave) {
+ if (!client_info->slave)
return;
- }
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
struct sk_buff *skb;
@@ -574,9 +567,8 @@ static void rlb_update_rx_clients(struct bonding *bond)
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->ntt) {
rlb_update_client(client_info);
- if (bond_info->rlb_update_retry_counter == 0) {
+ if (bond_info->rlb_update_retry_counter == 0)
client_info->ntt = 0;
- }
}
}
@@ -610,10 +602,10 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
}
}
- // update the team's flag only after the whole iteration
+ /* update the team's flag only after the whole iteration */
if (ntt) {
bond_info->rx_ntt = 1;
- //fasten the change
+ /* fasten the change */
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
@@ -677,9 +669,9 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
/* the entry is already assigned to this client */
if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
/* update mac address from arp */
- memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_dst);
}
- memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_src, arp->mac_src);
assigned_slave = client_info->slave;
if (assigned_slave) {
@@ -719,8 +711,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
* will be updated with clients actual unicast mac address
* upon receiving an arp reply.
*/
- memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
- memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_dst);
+ ether_addr_copy(client_info->mac_src, arp->mac_src);
client_info->slave = assigned_slave;
if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
@@ -770,9 +762,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
* rx channel
*/
tx_slave = rlb_choose_channel(skb, bond);
- if (tx_slave) {
- memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
- }
+ if (tx_slave)
+ ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
pr_debug("Server sent ARP Reply packet\n");
} else if (arp->op_code == htons(ARPOP_REQUEST)) {
/* Create an entry in the rx_hashtbl for this client as a
@@ -824,9 +815,8 @@ static void rlb_rebalance(struct bonding *bond)
}
/* update the team's flag only after the whole iteration */
- if (ntt) {
+ if (ntt)
bond_info->rx_ntt = 1;
- }
_unlock_rx_hashtbl_bh(bond);
}
@@ -923,7 +913,7 @@ static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- u32 ip_src_hash = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
u32 index;
_lock_rx_hashtbl_bh(bond);
@@ -957,9 +947,8 @@ static int rlb_initialize(struct bonding *bond)
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
- for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
+ for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
rlb_init_table_entry(bond_info->rx_hashtbl + i);
- }
_unlock_rx_hashtbl_bh(bond);
@@ -1014,9 +1003,9 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
char *data;
memset(&pkt, 0, size);
- memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
- memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
- pkt.type = cpu_to_be16(ETH_P_LOOP);
+ ether_addr_copy(pkt.mac_dst, mac_addr);
+ ether_addr_copy(pkt.mac_src, mac_addr);
+ pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
skb = dev_alloc_skb(size);
if (!skb)
@@ -1097,7 +1086,7 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
{
u8 tmp_mac_addr[ETH_ALEN];
- memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr);
alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
alb_set_slave_mac_addr(slave2, tmp_mac_addr);
@@ -1254,9 +1243,9 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
if (free_mac_slave) {
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
- pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
- bond->dev->name, slave->dev->name,
- free_mac_slave->dev->name);
+ pr_warn("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+ bond->dev->name, slave->dev->name,
+ free_mac_slave->dev->name);
} else if (has_bond_addr) {
pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
@@ -1294,12 +1283,12 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
bond_for_each_slave(bond, slave, iter) {
/* save net_device's current hw address */
- memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, slave->dev->dev_addr);
res = dev_set_mac_address(slave->dev, addr);
/* restore net_device's hw address */
- memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(slave->dev->dev_addr, tmp_addr);
if (res)
goto unwind;
@@ -1315,9 +1304,9 @@ unwind:
bond_for_each_slave(bond, rollback_slave, iter) {
if (rollback_slave == slave)
break;
- memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr);
dev_set_mac_address(rollback_slave->dev, &sa);
- memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr);
}
return res;
@@ -1330,9 +1319,8 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
int res;
res = tlb_initialize(bond);
- if (res) {
+ if (res)
return res;
- }
if (rlb_enabled) {
bond->alb_info.rlb_enabled = 1;
@@ -1355,9 +1343,8 @@ void bond_alb_deinitialize(struct bonding *bond)
tlb_deinitialize(bond);
- if (bond_info->rlb_enabled) {
+ if (bond_info->rlb_enabled)
rlb_deinitialize(bond);
- }
}
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
@@ -1436,14 +1423,13 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- hash_start = (char*)eth_data->h_dest;
+ hash_start = (char *)eth_data->h_dest;
hash_size = ETH_ALEN;
break;
case ETH_P_ARP:
do_tx_balance = 0;
- if (bond_info->rlb_enabled) {
+ if (bond_info->rlb_enabled)
tx_slave = rlb_arp_xmit(skb, bond);
- }
break;
default:
do_tx_balance = 0;
@@ -1463,23 +1449,22 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (tx_slave && SLAVE_IS_OK(tx_slave)) {
if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
- memcpy(eth_data->h_source,
- tx_slave->dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(eth_data->h_source,
+ tx_slave->dev->dev_addr);
}
bond_dev_queue_xmit(bond, skb, tx_slave->dev);
goto out;
- } else {
- if (tx_slave) {
- _lock_tx_hashtbl(bond);
- __tlb_clear_slave(bond, tx_slave, 0);
- _unlock_tx_hashtbl(bond);
- }
+ }
+
+ if (tx_slave) {
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
}
/* no suitable interface, frame not sent */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
out:
return NETDEV_TX_OK;
}
@@ -1577,11 +1562,10 @@ void bond_alb_monitor(struct work_struct *work)
--bond_info->rlb_update_delay_counter;
} else {
rlb_update_rx_clients(bond);
- if (bond_info->rlb_update_retry_counter) {
+ if (bond_info->rlb_update_retry_counter)
--bond_info->rlb_update_retry_counter;
- } else {
+ else
bond_info->rx_ntt = 0;
- }
}
}
}
@@ -1598,23 +1582,20 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
int res;
res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
- if (res) {
+ if (res)
return res;
- }
res = alb_handle_addr_collision_on_attach(bond, slave);
- if (res) {
+ if (res)
return res;
- }
tlb_init_slave(slave);
/* order a rebalance ASAP */
bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
bond->alb_info.rlb_rebalance = 1;
- }
return 0;
}
@@ -1645,9 +1626,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
if (link == BOND_LINK_DOWN) {
tlb_clear_slave(bond, slave, 0);
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
rlb_clear_slave(bond, slave);
- }
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
@@ -1723,14 +1703,14 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
struct sockaddr sa;
u8 tmp_addr[ETH_ALEN];
- memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, new_slave->dev->dev_addr);
memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
sa.sa_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
dev_set_mac_address(new_slave->dev, &sa);
- memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(new_slave->dev->dev_addr, tmp_addr);
}
/* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1759,14 +1739,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
struct slave *swap_slave;
int res;
- if (!is_valid_ether_addr(sa->sa_data)) {
+ if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- }
res = alb_set_mac_address(bond, addr);
- if (res) {
+ if (res)
return res;
- }
memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
@@ -1774,9 +1752,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
* Otherwise we'll need to pass the new address to it and handle
* duplications.
*/
- if (!bond->curr_active_slave) {
+ if (!bond->curr_active_slave)
return 0;
- }
swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
@@ -1800,8 +1777,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
rlb_clear_vlan(bond, vlan_id);
- }
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 5fc4c2351478..2d3f7fa541ff 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -69,7 +69,7 @@ void bond_debug_register(struct bonding *bond)
debugfs_create_dir(bond->dev->name, bonding_debug_root);
if (!bond->debug_dir) {
- pr_warning("%s: Warning: failed to register to debugfs\n",
+ pr_warn("%s: Warning: failed to register to debugfs\n",
bond->dev->name);
return;
}
@@ -98,9 +98,8 @@ void bond_debug_reregister(struct bonding *bond)
if (d) {
bond->debug_dir = d;
} else {
- pr_warning("%s: Warning: failed to reregister, "
- "so just unregister old one\n",
- bond->dev->name);
+ pr_warn("%s: Warning: failed to reregister, so just unregister old one\n",
+ bond->dev->name);
bond_debug_unregister(bond);
}
}
@@ -110,8 +109,7 @@ void bond_create_debugfs(void)
bonding_debug_root = debugfs_create_dir("bonding", NULL);
if (!bonding_debug_root) {
- pr_warning("Warning: Cannot create bonding directory"
- " in debugfs\n");
+ pr_warn("Warning: Cannot create bonding directory in debugfs\n");
}
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e5628fc725c3..95a6ca7d9e51 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -673,12 +673,12 @@ static void bond_do_fail_over_mac(struct bonding *bond,
write_unlock_bh(&bond->curr_slave_lock);
if (old_active) {
- memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
- memcpy(saddr.sa_data, old_active->dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
+ ether_addr_copy(saddr.sa_data,
+ old_active->dev->dev_addr);
saddr.sa_family = new_active->dev->type;
} else {
- memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(saddr.sa_data, bond->dev->dev_addr);
saddr.sa_family = bond->dev->type;
}
@@ -692,7 +692,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (!old_active)
goto out;
- memcpy(saddr.sa_data, tmp_mac, ETH_ALEN);
+ ether_addr_copy(saddr.sa_data, tmp_mac);
saddr.sa_family = old_active->dev->type;
rv = dev_set_mac_address(old_active->dev, &saddr);
@@ -798,11 +798,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
return;
if (new_active) {
- new_active->jiffies = jiffies;
+ new_active->last_link_up = jiffies;
if (new_active->link == BOND_LINK_BACK) {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: making interface %s the new active one %d ms earlier.\n",
+ pr_info("%s: making interface %s the new active one %d ms earlier\n",
bond->dev->name, new_active->dev->name,
(bond->params.updelay - new_active->delay) * bond->params.miimon);
}
@@ -817,7 +817,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: making interface %s the new active one.\n",
+ pr_info("%s: making interface %s the new active one\n",
bond->dev->name, new_active->dev->name);
}
}
@@ -910,7 +910,7 @@ void bond_select_active_slave(struct bonding *bond)
pr_info("%s: first active interface up!\n",
bond->dev->name);
} else {
- pr_info("%s: now running without any active interface !\n",
+ pr_info("%s: now running without any active interface!\n",
bond->dev->name);
}
}
@@ -922,12 +922,12 @@ static inline int slave_enable_netpoll(struct slave *slave)
struct netpoll *np;
int err = 0;
- np = kzalloc(sizeof(*np), GFP_ATOMIC);
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
err = -ENOMEM;
if (!np)
goto out;
- err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);
+ err = __netpoll_setup(np, slave->dev);
if (err) {
kfree(np);
goto out;
@@ -946,14 +946,6 @@ static inline void slave_disable_netpoll(struct slave *slave)
slave->np = NULL;
__netpoll_free_async(np);
}
-static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
-{
- if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
- return false;
- if (!slave_dev->netdev_ops->ndo_poll_controller)
- return false;
- return true;
-}
static void bond_poll_controller(struct net_device *bond_dev)
{
@@ -970,7 +962,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
slave_disable_netpoll(slave);
}
-static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;
@@ -1119,9 +1111,6 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
- if (bond->params.arp_interval)
- slave->dev->last_rx = jiffies;
-
recv_probe = ACCESS_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
@@ -1146,7 +1135,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
- memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr);
}
return ret;
@@ -1187,13 +1176,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
- pr_warning("%s: Warning: no link monitoring support for %s\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: no link monitoring support for %s\n",
+ bond_dev->name, slave_dev->name);
}
/* already enslaved */
if (slave_dev->flags & IFF_SLAVE) {
- pr_debug("Error, Device was already enslaved\n");
+ pr_debug("Error: Device was already enslaved\n");
return -EBUSY;
}
@@ -1211,9 +1200,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name, slave_dev->name, bond_dev->name);
return -EPERM;
} else {
- pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
- bond_dev->name, slave_dev->name,
- slave_dev->name, bond_dev->name);
+ pr_warn("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
+ bond_dev->name, slave_dev->name,
+ slave_dev->name, bond_dev->name);
}
} else {
pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
@@ -1226,7 +1215,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* enslaving it; the old ifenslave will not.
*/
if ((slave_dev->flags & IFF_UP)) {
- pr_err("%s is up. This may be due to an out of date ifenslave.\n",
+ pr_err("%s is up - this may be due to an out of date ifenslave\n",
slave_dev->name);
res = -EPERM;
goto err_undo_flags;
@@ -1270,24 +1259,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev);
}
} else if (bond_dev->type != slave_dev->type) {
- pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
- slave_dev->name,
- slave_dev->type, bond_dev->type);
+ pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
+ slave_dev->name, slave_dev->type, bond_dev->type);
res = -EINVAL;
goto err_undo_flags;
}
if (slave_ops->ndo_set_mac_address == NULL) {
if (!bond_has_slaves(bond)) {
- pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
bond_dev->name);
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
- pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+ pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
bond_dev->name);
}
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
- pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
+ pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n",
bond_dev->name);
res = -EOPNOTSUPP;
goto err_undo_flags;
@@ -1326,7 +1314,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* that need it, and for restoring it upon release, and then
* set it to the master's address
*/
- memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
if (!bond->params.fail_over_mac ||
bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
@@ -1410,10 +1398,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_update_speed_duplex(new_slave);
- new_slave->last_arp_rx = jiffies -
+ new_slave->last_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
- new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx;
+ new_slave->target_last_arp_rx[i] = new_slave->last_rx;
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1428,12 +1416,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
- pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
+ bond_dev->name, slave_dev->name);
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
- pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
+ bond_dev->name, slave_dev->name);
}
}
@@ -1457,10 +1445,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
if (new_slave->link != BOND_LINK_DOWN)
- new_slave->jiffies = jiffies;
+ new_slave->last_link_up = jiffies;
pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
- new_slave->link == BOND_LINK_DOWN ? "DOWN" :
- (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+ new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+ (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
@@ -1520,9 +1508,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- pr_info("Error, %s: master_dev is using netpoll, "
- "but new slave device does not support netpoll.\n",
- bond_dev->name);
+ pr_info("Error, %s: master_dev is using netpoll, but new slave device does not support netpoll\n",
+ bond_dev->name);
res = -EBUSY;
goto err_detach;
}
@@ -1560,10 +1547,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
unblock_netpoll_tx();
}
- pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
+ pr_info("%s: Enslaving %s as %s interface with %s link\n",
bond_dev->name, slave_dev->name,
- bond_is_active_slave(new_slave) ? "n active" : " backup",
- new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
+ bond_is_active_slave(new_slave) ? "an active" : "a backup",
+ new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
return 0;
@@ -1603,7 +1590,7 @@ err_restore_mac:
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
*/
- memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
dev_set_mac_address(slave_dev, &addr);
}
@@ -1648,7 +1635,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- pr_err("%s: Error: cannot release %s.\n",
+ pr_err("%s: Error: cannot release %s\n",
bond_dev->name, slave_dev->name);
return -EINVAL;
}
@@ -1679,7 +1666,7 @@ static int __bond_release_one(struct net_device *bond_dev,
write_unlock_bh(&bond->lock);
- pr_info("%s: releasing %s interface %s\n",
+ pr_info("%s: Releasing %s interface %s\n",
bond_dev->name,
bond_is_active_slave(slave) ? "active" : "backup",
slave_dev->name);
@@ -1692,10 +1679,10 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
- pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
- bond_dev->name, slave_dev->name,
- slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
+ bond_dev->name, slave_dev->name,
+ slave->perm_hwaddr,
+ bond_dev->name, slave_dev->name);
}
if (bond->primary_slave == slave)
@@ -1736,10 +1723,10 @@ static int __bond_release_one(struct net_device *bond_dev,
eth_hw_addr_random(bond_dev);
if (vlan_uses_dev(bond_dev)) {
- pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
- bond_dev->name);
+ pr_warn("%s: Warning: clearing HW address of %s while it still has VLANs\n",
+ bond_dev->name, bond_dev->name);
+ pr_warn("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs\n",
+ bond_dev->name);
}
}
@@ -1755,7 +1742,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
- pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+ pr_info("%s: last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
bond_dev->name, slave_dev->name, bond_dev->name);
/* must do this from outside any spinlocks */
@@ -1790,7 +1777,7 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
- memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
dev_set_mac_address(slave_dev, &addr);
}
@@ -1823,7 +1810,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
ret = bond_release(bond_dev, slave_dev);
if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
- pr_info("%s: destroying bond %s.\n",
+ pr_info("%s: Destroying bond %s\n",
bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
}
@@ -1837,9 +1824,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
info->bond_mode = bond->params.mode;
info->miimon = bond->params.miimon;
- read_lock(&bond->lock);
info->num_slaves = bond->slave_cnt;
- read_unlock(&bond->lock);
return 0;
}
@@ -1851,7 +1836,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
int i = 0, res = -ENODEV;
struct slave *slave;
- read_lock(&bond->lock);
bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
@@ -1862,7 +1846,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
break;
}
}
- read_unlock(&bond->lock);
return res;
}
@@ -1892,7 +1875,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->link = BOND_LINK_FAIL;
slave->delay = bond->params.downdelay;
if (slave->delay) {
- pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
+ pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
bond->dev->name,
(bond->params.mode ==
BOND_MODE_ACTIVEBACKUP) ?
@@ -1908,8 +1891,8 @@ static int bond_miimon_inspect(struct bonding *bond)
* recovered before downdelay expired
*/
slave->link = BOND_LINK_UP;
- slave->jiffies = jiffies;
- pr_info("%s: link status up again after %d ms for interface %s.\n",
+ slave->last_link_up = jiffies;
+ pr_info("%s: link status up again after %d ms for interface %s\n",
bond->dev->name,
(bond->params.downdelay - slave->delay) *
bond->params.miimon,
@@ -1934,7 +1917,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = bond->params.updelay;
if (slave->delay) {
- pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
+ pr_info("%s: link status up for interface %s, enabling it in %d ms\n",
bond->dev->name, slave->dev->name,
ignore_updelay ? 0 :
bond->params.updelay *
@@ -1944,7 +1927,7 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
slave->link = BOND_LINK_DOWN;
- pr_info("%s: link status down again after %d ms for interface %s.\n",
+ pr_info("%s: link status down again after %d ms for interface %s\n",
bond->dev->name,
(bond->params.updelay - slave->delay) *
bond->params.miimon,
@@ -1983,7 +1966,7 @@ static void bond_miimon_commit(struct bonding *bond)
case BOND_LINK_UP:
slave->link = BOND_LINK_UP;
- slave->jiffies = jiffies;
+ slave->last_link_up = jiffies;
if (bond->params.mode == BOND_MODE_8023AD) {
/* prevent it from being the active one */
@@ -1996,7 +1979,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond_set_backup_slave(slave);
}
- pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
+ pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex\n",
bond->dev->name, slave->dev->name,
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
@@ -2141,24 +2124,40 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
* switches in VLAN mode (especially if ports are configured as
* "native" to a VLAN) might not pass non-tagged frames.
*/
-static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, unsigned short vlan_id)
+static void bond_arp_send(struct net_device *slave_dev, int arp_op,
+ __be32 dest_ip, __be32 src_ip,
+ struct bond_vlan_tag *inner,
+ struct bond_vlan_tag *outer)
{
struct sk_buff *skb;
- pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
- slave_dev->name, &dest_ip, &src_ip, vlan_id);
+ pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
+ arp_op, slave_dev->name, &dest_ip, &src_ip);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
if (!skb) {
- pr_err("ARP packet allocation failed\n");
+ net_err_ratelimited("ARP packet allocation failed\n");
return;
}
- if (vlan_id) {
- skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+ if (outer->vlan_id) {
+ if (inner->vlan_id) {
+ pr_debug("inner tag: proto %X vid %X\n",
+ ntohs(inner->vlan_proto), inner->vlan_id);
+ skb = __vlan_put_tag(skb, inner->vlan_proto,
+ inner->vlan_id);
+ if (!skb) {
+ net_err_ratelimited("failed to insert inner VLAN tag\n");
+ return;
+ }
+ }
+
+ pr_debug("outer reg: proto %X vid %X\n",
+ ntohs(outer->vlan_proto), outer->vlan_id);
+ skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
if (!skb) {
- pr_err("failed to insert VLAN tag\n");
+ net_err_ratelimited("failed to insert outer VLAN tag\n");
return;
}
}
@@ -2171,23 +2170,32 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
struct net_device *upper, *vlan_upper;
struct list_head *iter, *vlan_iter;
struct rtable *rt;
+ struct bond_vlan_tag inner, outer;
__be32 *targets = bond->params.arp_targets, addr;
- int i, vlan_id;
+ int i;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
pr_debug("basa: target %pI4\n", &targets[i]);
+ inner.vlan_proto = 0;
+ inner.vlan_id = 0;
+ outer.vlan_proto = 0;
+ outer.vlan_id = 0;
/* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
RTO_ONLINK, 0);
if (IS_ERR(rt)) {
- pr_debug("%s: no route to arp_ip_target %pI4\n",
- bond->dev->name, &targets[i]);
+ /* there's no route to target - try to send arp
+ * probe to generate any traffic (arp_validate=0)
+ */
+ if (bond->params.arp_validate)
+ net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
+ bond->dev->name,
+ &targets[i]);
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
continue;
}
- vlan_id = 0;
-
/* bond device itself */
if (rt->dst.dev == bond->dev)
goto found;
@@ -2197,17 +2205,30 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
* found we verify its upper dev list, searching for the
* rt->dst.dev. If found we save the tag of the vlan and
* proceed to send the packet.
- *
- * TODO: QinQ?
*/
netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
vlan_iter) {
if (!is_vlan_dev(vlan_upper))
continue;
+
+ if (vlan_upper == rt->dst.dev) {
+ outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
+ outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
+ rcu_read_unlock();
+ goto found;
+ }
netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
iter) {
if (upper == rt->dst.dev) {
- vlan_id = vlan_dev_vlan_id(vlan_upper);
+ /* If the upper dev is a vlan dev too,
+ * set the vlan tag to inner tag.
+ */
+ if (is_vlan_dev(upper)) {
+ inner.vlan_proto = vlan_dev_vlan_proto(upper);
+ inner.vlan_id = vlan_dev_vlan_id(upper);
+ }
+ outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
+ outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
rcu_read_unlock();
goto found;
}
@@ -2220,10 +2241,6 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
*/
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (upper == rt->dst.dev) {
- /* if it's a vlan - get its VID */
- if (is_vlan_dev(upper))
- vlan_id = vlan_dev_vlan_id(upper);
-
rcu_read_unlock();
goto found;
}
@@ -2242,7 +2259,7 @@ found:
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, vlan_id);
+ addr, &inner, &outer);
}
}
@@ -2260,7 +2277,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
pr_debug("bva: sip %pI4 not found in targets\n", &sip);
return;
}
- slave->last_arp_rx = jiffies;
+ slave->last_rx = jiffies;
slave->target_last_arp_rx[i] = jiffies;
}
@@ -2268,17 +2285,19 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
+ struct slave *curr_active_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int alen;
+ int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
- if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
+ if (!slave_do_arp_validate(bond, slave)) {
+ if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
+ !slave_do_arp_validate_only(bond, slave))
+ slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
-
- read_lock(&bond->lock);
-
- if (!slave_do_arp_validate(bond, slave))
- goto out_unlock;
+ } else if (!is_arp) {
+ return RX_HANDLER_ANOTHER;
+ }
alen = arp_hdr_len(bond->dev);
@@ -2312,6 +2331,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
bond->params.arp_validate, slave_do_arp_validate(bond, slave),
&sip, &tip);
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
/*
* Backup slaves won't see the ARP reply, but do come through
* here for each ARP probe (so we swap the sip/tip to validate
@@ -2325,15 +2346,15 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
* is done to avoid endless looping when we can't reach the
* arp_ip_target and fool ourselves with our own arp requests.
*/
+
if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
- else if (bond->curr_active_slave &&
- time_after(slave_last_rx(bond, bond->curr_active_slave),
- bond->curr_active_slave->jiffies))
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
out_unlock:
- read_unlock(&bond->lock);
if (arp != (struct arphdr *)skb->data)
kfree(arp);
return RX_HANDLER_ANOTHER;
@@ -2376,9 +2397,9 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
- * the picture unless it is null. also, slave->jiffies is not needed
- * here because we send an arp on each slave and give a slave as
- * long as it needs to get the tx/rx within the delta.
+ * the picture unless it is null. also, slave->last_link_up is not
+ * needed here because we send an arp on each slave and give a slave
+ * as long as it needs to get the tx/rx within the delta.
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
@@ -2387,7 +2408,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
- bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
+ bond_time_in_interval(bond, slave->last_rx, 1)) {
slave->link = BOND_LINK_UP;
slave_state_changed = 1;
@@ -2398,7 +2419,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* is closed.
*/
if (!oldcurrent) {
- pr_info("%s: link status definitely up for interface %s, ",
+ pr_info("%s: link status definitely up for interface %s\n",
bond->dev->name,
slave->dev->name);
do_failover = 1;
@@ -2416,7 +2437,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* if we don't know our ip yet
*/
if (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
+ !bond_time_in_interval(bond, slave->last_rx, 2)) {
slave->link = BOND_LINK_DOWN;
slave_state_changed = 1;
@@ -2424,9 +2445,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- pr_info("%s: interface %s is now down.\n",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: interface %s is now down\n",
+ bond->dev->name, slave->dev->name);
if (slave == oldcurrent)
do_failover = 1;
@@ -2505,7 +2525,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (bond_time_in_interval(bond, slave->jiffies, 2))
+ if (bond_time_in_interval(bond, slave->last_link_up, 2))
continue;
/*
@@ -2576,7 +2596,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
bond->current_arp_slave = NULL;
}
- pr_info("%s: link status definitely up for interface %s.\n",
+ pr_info("%s: link status definitely up for interface %s\n",
bond->dev->name, slave->dev->name);
if (!bond->curr_active_slave ||
@@ -2682,7 +2702,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_LATER);
- pr_info("%s: backup interface %s is now down.\n",
+ pr_info("%s: backup interface %s is now down\n",
bond->dev->name, slave->dev->name);
}
if (slave == curr_arp_slave)
@@ -2698,7 +2718,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
new_slave->link = BOND_LINK_BACK;
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
- new_slave->jiffies = jiffies;
+ new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
check_state:
@@ -2879,9 +2899,9 @@ static int bond_slave_netdev_event(unsigned long event,
break;
}
- pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
- bond->dev->name, bond->primary_slave ? slave_dev->name :
- "none");
+ pr_info("%s: Primary slave changed to %s, reselecting active slave\n",
+ bond->dev->name,
+ bond->primary_slave ? slave_dev->name : "none");
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
@@ -2917,8 +2937,7 @@ static int bond_netdev_event(struct notifier_block *this,
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
pr_debug("event_dev: %s, event: %lx\n",
- event_dev ? event_dev->name : "None",
- event);
+ event_dev ? event_dev->name : "None", event);
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
@@ -2967,7 +2986,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
fk->ports = 0;
noff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
- if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
return false;
iph = ip_hdr(skb);
fk->src = iph->saddr;
@@ -2976,7 +2995,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
if (!ip_is_fragment(iph))
proto = iph->protocol;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
return false;
iph6 = ipv6_hdr(skb);
fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
@@ -3087,8 +3106,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_arp_rcv;
}
if (bond->params.mode == BOND_MODE_8023AD) {
@@ -3375,8 +3393,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
struct list_head *iter;
int res = 0;
- pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
- (bond_dev ? bond_dev->name : "None"), new_mtu);
+ pr_debug("bond=%p, name=%s, new_mtu=%d\n",
+ bond, bond_dev ? bond_dev->name : "None", new_mtu);
/* Can't hold bond->lock with bh disabled here since
* some base drivers panic. On the other hand we can't
@@ -3395,8 +3413,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
bond_for_each_slave(bond, slave, iter) {
pr_debug("s %p c_m %p\n",
- slave,
- slave->dev->netdev_ops->ndo_change_mtu);
+ slave, slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3484,15 +3501,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
*/
bond_for_each_slave(bond, slave, iter) {
- const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
pr_debug("slave %p %s\n", slave, slave->dev->name);
-
- if (slave_ops->ndo_set_mac_address == NULL) {
- res = -EOPNOTSUPP;
- pr_debug("EOPNOTSUPP %s\n", slave->dev->name);
- goto unwind;
- }
-
res = dev_set_mac_address(slave->dev, addr);
if (res) {
/* TODO: consider downing the slave
@@ -3568,7 +3577,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
}
}
/* no slave that can tx has been found */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
}
/**
@@ -3644,7 +3653,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -3676,8 +3685,8 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
- pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
- bond_dev->name);
+ net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
+ bond_dev->name, __func__);
continue;
}
/* bond_dev_queue_xmit always returns 0 */
@@ -3687,7 +3696,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -3774,7 +3783,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
pr_err("%s: Error: Unknown bonding mode %d\n",
dev->name, bond->params.mode);
WARN_ON_ONCE(1);
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
}
@@ -3788,14 +3797,14 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
* If we risk deadlock from transmitting this in the
* netpoll path, tell netpoll to queue the frame for later tx
*/
- if (is_netpoll_tx_blocked(dev))
+ if (unlikely(is_netpoll_tx_blocked(dev)))
return NETDEV_TX_BUSY;
rcu_read_lock();
if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
rcu_read_unlock();
return ret;
@@ -3958,7 +3967,7 @@ static void bond_uninit(struct net_device *bond_dev)
/* Release the bonded slaves */
bond_for_each_slave(bond, slave, iter)
__bond_release_one(bond_dev, slave->dev, true);
- pr_info("%s: released all slaves\n", bond_dev->name);
+ pr_info("%s: Released all slaves\n", bond_dev->name);
list_del(&bond->bond_list);
@@ -3967,56 +3976,11 @@ static void bond_uninit(struct net_device *bond_dev)
/*------------------------- Module initialization ---------------------------*/
-int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
-{
- int i;
-
- for (i = 0; tbl[i].modename; i++)
- if (mode == tbl[i].mode)
- return tbl[i].mode;
-
- return -1;
-}
-
-static int bond_parm_tbl_lookup_name(const char *modename,
- const struct bond_parm_tbl *tbl)
-{
- int i;
-
- for (i = 0; tbl[i].modename; i++)
- if (strcmp(modename, tbl[i].modename) == 0)
- return tbl[i].mode;
-
- return -1;
-}
-
-/*
- * Convert string input module parms. Accept either the
- * number of the mode or its string name. A bit complicated because
- * some mode names are substrings of other names, and calls from sysfs
- * may have whitespace in the name (trailing newlines, for example).
- */
-int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
-{
- int modeint;
- char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
-
- for (p = (char *)buf; *p; p++)
- if (!(isdigit(*p) || isspace(*p)))
- break;
-
- if (*p && sscanf(buf, "%20s", modestr) != 0)
- return bond_parm_tbl_lookup_name(modestr, tbl);
- else if (sscanf(buf, "%d", &modeint) != 0)
- return bond_parm_tbl_lookup(modeint, tbl);
-
- return -1;
-}
-
static int bond_check_params(struct bond_params *params)
{
int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
- struct bond_opt_value newval, *valptr;
+ struct bond_opt_value newval;
+ const struct bond_opt_value *valptr;
int arp_all_targets_value;
/*
@@ -4036,7 +4000,7 @@ static int bond_check_params(struct bond_params *params)
if ((bond_mode != BOND_MODE_XOR) &&
(bond_mode != BOND_MODE_8023AD)) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
- bond_mode_name(bond_mode));
+ bond_mode_name(bond_mode));
} else {
bond_opt_initstr(&newval, xmit_hash_policy);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
@@ -4077,74 +4041,71 @@ static int bond_check_params(struct bond_params *params)
}
params->ad_select = valptr->value;
if (bond_mode != BOND_MODE_8023AD)
- pr_warning("ad_select param only affects 802.3ad mode\n");
+ pr_warn("ad_select param only affects 802.3ad mode\n");
} else {
params->ad_select = BOND_AD_STABLE;
}
if (max_bonds < 0) {
- pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
- max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
+ pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
+ max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
max_bonds = BOND_DEFAULT_MAX_BONDS;
}
if (miimon < 0) {
- pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- miimon, INT_MAX);
+ pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ miimon, INT_MAX);
miimon = 0;
}
if (updelay < 0) {
- pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- updelay, INT_MAX);
+ pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ updelay, INT_MAX);
updelay = 0;
}
if (downdelay < 0) {
- pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- downdelay, INT_MAX);
+ pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ downdelay, INT_MAX);
downdelay = 0;
}
if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
- use_carrier);
+ pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
+ use_carrier);
use_carrier = 1;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
- pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
- num_peer_notif);
+ pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
+ num_peer_notif);
num_peer_notif = 1;
}
/* reset values for 802.3ad/TLB/ALB */
if (BOND_NO_USES_ARP(bond_mode)) {
if (!miimon) {
- pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
- pr_warning("Forcing miimon to 100msec\n");
+ pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
+ pr_warn("Forcing miimon to 100msec\n");
miimon = BOND_DEFAULT_MIIMON;
}
}
if (tx_queues < 1 || tx_queues > 255) {
- pr_warning("Warning: tx_queues (%d) should be between "
- "1 and 255, resetting to %d\n",
- tx_queues, BOND_DEFAULT_TX_QUEUES);
+ pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
+ tx_queues, BOND_DEFAULT_TX_QUEUES);
tx_queues = BOND_DEFAULT_TX_QUEUES;
}
if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
- pr_warning("Warning: all_slaves_active module parameter (%d), "
- "not of valid value (0/1), so it was set to "
- "0\n", all_slaves_active);
+ pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
+ all_slaves_active);
all_slaves_active = 0;
}
if (resend_igmp < 0 || resend_igmp > 255) {
- pr_warning("Warning: resend_igmp (%d) should be between "
- "0 and 255, resetting to %d\n",
- resend_igmp, BOND_DEFAULT_RESEND_IGMP);
+ pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
+ resend_igmp, BOND_DEFAULT_RESEND_IGMP);
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
@@ -4165,37 +4126,36 @@ static int bond_check_params(struct bond_params *params)
/* just warn the user the up/down delay will have
* no effect since miimon is zero...
*/
- pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
- updelay, downdelay);
+ pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
+ updelay, downdelay);
}
} else {
/* don't allow arp monitoring */
if (arp_interval) {
- pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
- miimon, arp_interval);
+ pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
+ miimon, arp_interval);
arp_interval = 0;
}
if ((updelay % miimon) != 0) {
- pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
- updelay, miimon,
- (updelay / miimon) * miimon);
+ pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ updelay, miimon, (updelay / miimon) * miimon);
}
updelay /= miimon;
if ((downdelay % miimon) != 0) {
- pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
- downdelay, miimon,
- (downdelay / miimon) * miimon);
+ pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
+ downdelay, miimon,
+ (downdelay / miimon) * miimon);
}
downdelay /= miimon;
}
if (arp_interval < 0) {
- pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n",
- arp_interval, INT_MAX);
+ pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ arp_interval, INT_MAX);
arp_interval = 0;
}
@@ -4206,30 +4166,26 @@ static int bond_check_params(struct bond_params *params)
__be32 ip;
if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
- pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
- arp_ip_target[i]);
+ pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
+ arp_ip_target[i]);
arp_interval = 0;
} else {
if (bond_get_targets_ip(arp_target, ip) == -1)
arp_target[arp_ip_count++] = ip;
else
- pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
- &ip);
+ pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
+ &ip);
}
}
if (arp_interval && !arp_ip_count) {
/* don't allow arping if no arp_ip_target given... */
- pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
- arp_interval);
+ pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
+ arp_interval);
arp_interval = 0;
}
if (arp_validate) {
- if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err("arp_validate only supported in active-backup mode\n");
- return -EINVAL;
- }
if (!arp_interval) {
pr_err("arp_validate requires arp_interval\n");
return -EINVAL;
@@ -4271,23 +4227,23 @@ static int bond_check_params(struct bond_params *params)
arp_interval, valptr->string, arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
- pr_info(" %s", arp_ip_target[i]);
+ pr_cont(" %s", arp_ip_target[i]);
- pr_info("\n");
+ pr_cont("\n");
} else if (max_bonds) {
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
+ pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
}
if (primary && !USES_PRIMARY(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
- pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
- primary, bond_mode_name(bond_mode));
+ pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
+ primary, bond_mode_name(bond_mode));
primary = NULL;
}
@@ -4316,14 +4272,14 @@ static int bond_check_params(struct bond_params *params)
}
fail_over_mac_value = valptr->value;
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
- pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
+ pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
if (lp_interval == 0) {
- pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
- INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
+ pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+ INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
}
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 70651f8e8e3b..f847e165d252 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -181,7 +181,7 @@ static int bond_changelink(struct net_device *bond_dev,
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
if (arp_interval && miimon) {
- pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+ pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
bond->dev->name);
return -EINVAL;
}
@@ -199,7 +199,7 @@ static int bond_changelink(struct net_device *bond_dev,
nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
__be32 target = nla_get_be32(attr);
- bond_opt_initval(&newval, target);
+ bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
&newval);
if (err)
@@ -207,7 +207,7 @@ static int bond_changelink(struct net_device *bond_dev,
i++;
}
if (i == 0 && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
+ pr_warn("%s: Removing last arp target with arp_interval on\n",
bond->dev->name);
if (err)
return err;
@@ -216,7 +216,7 @@ static int bond_changelink(struct net_device *bond_dev,
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
if (arp_validate && miimon) {
- pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+ pr_err("%s: ARP validating cannot be used with MII monitoring\n",
bond->dev->name);
return -EINVAL;
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 298c26509095..724e30fa20b9 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -20,7 +20,59 @@
#include <linux/inet.h>
#include "bonding.h"
-static struct bond_opt_value bond_mode_tbl[] = {
+static int bond_option_active_slave_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_miimon_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_updelay_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_downdelay_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_use_carrier_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_arp_interval_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
+static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
+static int bond_option_arp_ip_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_arp_validate_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_arp_all_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_primary_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_primary_reselect_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_fail_over_mac_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_resend_igmp_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_num_peer_notif_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_all_slaves_active_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_min_links_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_lp_interval_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_pps_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_lacp_rate_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_ad_select_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_queue_id_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_mode_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_slaves_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+
+
+static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
{ "active-backup", BOND_MODE_ACTIVEBACKUP, 0},
{ "balance-xor", BOND_MODE_XOR, 0},
@@ -31,13 +83,13 @@ static struct bond_opt_value bond_mode_tbl[] = {
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_pps_tbl[] = {
+static const struct bond_opt_value bond_pps_tbl[] = {
{ "default", 1, BOND_VALFLAG_DEFAULT},
{ "maxval", USHRT_MAX, BOND_VALFLAG_MAX},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
+static const struct bond_opt_value bond_xmit_hashtype_tbl[] = {
{ "layer2", BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
{ "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
{ "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
@@ -46,85 +98,88 @@ static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_arp_validate_tbl[] = {
- { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
- { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
- { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
- { "all", BOND_ARP_VALIDATE_ALL, 0},
- { NULL, -1, 0},
+static const struct bond_opt_value bond_arp_validate_tbl[] = {
+ { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
+ { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
+ { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
+ { "all", BOND_ARP_VALIDATE_ALL, 0},
+ { "filter", BOND_ARP_FILTER, 0},
+ { "filter_active", BOND_ARP_FILTER_ACTIVE, 0},
+ { "filter_backup", BOND_ARP_FILTER_BACKUP, 0},
+ { NULL, -1, 0},
};
-static struct bond_opt_value bond_arp_all_targets_tbl[] = {
+static const struct bond_opt_value bond_arp_all_targets_tbl[] = {
{ "any", BOND_ARP_TARGETS_ANY, BOND_VALFLAG_DEFAULT},
{ "all", BOND_ARP_TARGETS_ALL, 0},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_fail_over_mac_tbl[] = {
+static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
{ "none", BOND_FOM_NONE, BOND_VALFLAG_DEFAULT},
{ "active", BOND_FOM_ACTIVE, 0},
{ "follow", BOND_FOM_FOLLOW, 0},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_intmax_tbl[] = {
+static const struct bond_opt_value bond_intmax_tbl[] = {
{ "off", 0, BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
};
-static struct bond_opt_value bond_lacp_rate_tbl[] = {
+static const struct bond_opt_value bond_lacp_rate_tbl[] = {
{ "slow", AD_LACP_SLOW, 0},
{ "fast", AD_LACP_FAST, 0},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_ad_select_tbl[] = {
+static const struct bond_opt_value bond_ad_select_tbl[] = {
{ "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
{ "bandwidth", BOND_AD_BANDWIDTH, 0},
{ "count", BOND_AD_COUNT, 0},
{ NULL, -1, 0},
};
-static struct bond_opt_value bond_num_peer_notif_tbl[] = {
+static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ "off", 0, 0},
{ "maxval", 255, BOND_VALFLAG_MAX},
{ "default", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
-static struct bond_opt_value bond_primary_reselect_tbl[] = {
+static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
{ "failure", BOND_PRI_RESELECT_FAILURE, 0},
{ NULL, -1},
};
-static struct bond_opt_value bond_use_carrier_tbl[] = {
+static const struct bond_opt_value bond_use_carrier_tbl[] = {
{ "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
-static struct bond_opt_value bond_all_slaves_active_tbl[] = {
+static const struct bond_opt_value bond_all_slaves_active_tbl[] = {
{ "off", 0, BOND_VALFLAG_DEFAULT},
{ "on", 1, 0},
{ NULL, -1, 0}
};
-static struct bond_opt_value bond_resend_igmp_tbl[] = {
+static const struct bond_opt_value bond_resend_igmp_tbl[] = {
{ "off", 0, 0},
{ "maxval", 255, BOND_VALFLAG_MAX},
{ "default", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
-static struct bond_opt_value bond_lp_interval_tbl[] = {
+static const struct bond_opt_value bond_lp_interval_tbl[] = {
{ "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
{ NULL, -1, 0},
};
-static struct bond_option bond_opts[] = {
+static const struct bond_option bond_opts[] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
.name = "mode",
@@ -152,7 +207,8 @@ static struct bond_option bond_opts[] = {
.id = BOND_OPT_ARP_VALIDATE,
.name = "arp_validate",
.desc = "validate src/dst of ARP probes",
- .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP)),
+ .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB),
.values = bond_arp_validate_tbl,
.set = bond_option_arp_validate_set
},
@@ -312,9 +368,9 @@ static struct bond_option bond_opts[] = {
};
/* Searches for a value in opt's values[] table */
-struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
+const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
{
- struct bond_option *opt;
+ const struct bond_option *opt;
int i;
opt = bond_opt_get(option);
@@ -328,7 +384,7 @@ struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
}
/* Searches for a value in opt's values[] table which matches the flagmask */
-static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
+static const struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
u32 flagmask)
{
int i;
@@ -345,7 +401,7 @@ static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
*/
static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
{
- struct bond_opt_value *minval, *maxval;
+ const struct bond_opt_value *minval, *maxval;
minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
@@ -365,11 +421,12 @@ static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
* or the struct_opt_value that matched. It also strips the new line from
* @val->string if it's present.
*/
-struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
- struct bond_opt_value *val)
+const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val)
{
char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, };
- struct bond_opt_value *tbl, *ret = NULL;
+ const struct bond_opt_value *tbl;
+ const struct bond_opt_value *ret = NULL;
bool checkval;
int i, rv;
@@ -448,7 +505,7 @@ static int bond_opt_check_deps(struct bonding *bond,
static void bond_opt_dep_print(struct bonding *bond,
const struct bond_option *opt)
{
- struct bond_opt_value *modeval;
+ const struct bond_opt_value *modeval;
struct bond_params *params;
params = &bond->params;
@@ -461,9 +518,9 @@ static void bond_opt_dep_print(struct bonding *bond,
static void bond_opt_error_interpret(struct bonding *bond,
const struct bond_option *opt,
- int error, struct bond_opt_value *val)
+ int error, const struct bond_opt_value *val)
{
- struct bond_opt_value *minval, *maxval;
+ const struct bond_opt_value *minval, *maxval;
char *p;
switch (error) {
@@ -474,10 +531,10 @@ static void bond_opt_error_interpret(struct bonding *bond,
p = strchr(val->string, '\n');
if (p)
*p = '\0';
- pr_err("%s: option %s: invalid value (%s).\n",
+ pr_err("%s: option %s: invalid value (%s)\n",
bond->dev->name, opt->name, val->string);
} else {
- pr_err("%s: option %s: invalid value (%llu).\n",
+ pr_err("%s: option %s: invalid value (%llu)\n",
bond->dev->name, opt->name, val->value);
}
}
@@ -485,7 +542,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
if (!maxval)
break;
- pr_err("%s: option %s: allowed values %llu - %llu.\n",
+ pr_err("%s: option %s: allowed values %llu - %llu\n",
bond->dev->name, opt->name, minval ? minval->value : 0,
maxval->value);
break;
@@ -493,11 +550,11 @@ static void bond_opt_error_interpret(struct bonding *bond,
bond_opt_dep_print(bond, opt);
break;
case -ENOTEMPTY:
- pr_err("%s: option %s: unable to set because the bond device has slaves.\n",
+ pr_err("%s: option %s: unable to set because the bond device has slaves\n",
bond->dev->name, opt->name);
break;
case -EBUSY:
- pr_err("%s: option %s: unable to set because the bond device is up.\n",
+ pr_err("%s: option %s: unable to set because the bond device is up\n",
bond->dev->name, opt->name);
break;
default:
@@ -518,7 +575,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
int __bond_opt_set(struct bonding *bond,
unsigned int option, struct bond_opt_value *val)
{
- struct bond_opt_value *retval = NULL;
+ const struct bond_opt_value *retval = NULL;
const struct bond_option *opt;
int ret = -ENOENT;
@@ -573,7 +630,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf)
* This function checks if option is valid and if so returns a pointer
* to its entry in the bond_opts[] option array.
*/
-struct bond_option *bond_opt_get(unsigned int option)
+const struct bond_option *bond_opt_get(unsigned int option)
{
if (!BOND_OPT_VALID(option))
return NULL;
@@ -581,7 +638,7 @@ struct bond_option *bond_opt_get(unsigned int option)
return &bond_opts[option];
}
-int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
+int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
{
if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
@@ -590,7 +647,7 @@ int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
bond->params.arp_interval = 0;
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
- pr_info("%s: Setting MII monitoring interval to %d.\n",
+ pr_info("%s: Setting MII monitoring interval to %d\n",
bond->dev->name, bond->params.miimon);
}
@@ -619,8 +676,8 @@ struct net_device *bond_option_active_slave_get(struct bonding *bond)
return __bond_option_active_slave_get(bond, bond->curr_active_slave);
}
-int bond_option_active_slave_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_active_slave_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
char ifname[IFNAMSIZ] = { 0, };
struct net_device *slave_dev;
@@ -637,13 +694,13 @@ int bond_option_active_slave_set(struct bonding *bond,
if (slave_dev) {
if (!netif_is_bond_slave(slave_dev)) {
- pr_err("Device %s is not bonding slave.\n",
+ pr_err("Device %s is not bonding slave\n",
slave_dev->name);
return -EINVAL;
}
if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
- pr_err("%s: Device %s is not our slave.\n",
+ pr_err("%s: Device %s is not our slave\n",
bond->dev->name, slave_dev->name);
return -EINVAL;
}
@@ -654,9 +711,8 @@ int bond_option_active_slave_set(struct bonding *bond,
/* check to see if we are clearing active */
if (!slave_dev) {
- pr_info("%s: Clearing current active slave.\n",
- bond->dev->name);
- rcu_assign_pointer(bond->curr_active_slave, NULL);
+ pr_info("%s: Clearing current active slave\n", bond->dev->name);
+ RCU_INIT_POINTER(bond->curr_active_slave, NULL);
bond_select_active_slave(bond);
} else {
struct slave *old_active = bond->curr_active_slave;
@@ -666,16 +722,16 @@ int bond_option_active_slave_set(struct bonding *bond,
if (new_active == old_active) {
/* do nothing */
- pr_info("%s: %s is already the current active slave.\n",
+ pr_info("%s: %s is already the current active slave\n",
bond->dev->name, new_active->dev->name);
} else {
if (old_active && (new_active->link == BOND_LINK_UP) &&
IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active slave.\n",
+ pr_info("%s: Setting %s as active slave\n",
bond->dev->name, new_active->dev->name);
bond_change_active_slave(bond, new_active);
} else {
- pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+ pr_err("%s: Could not set %s as active slave; either %s is down or the link is down\n",
bond->dev->name, new_active->dev->name,
new_active->dev->name);
ret = -EINVAL;
@@ -689,21 +745,22 @@ int bond_option_active_slave_set(struct bonding *bond,
return ret;
}
-int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_miimon_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting MII monitoring interval to %llu.\n",
+ pr_info("%s: Setting MII monitoring interval to %llu\n",
bond->dev->name, newval->value);
bond->params.miimon = newval->value;
if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
bond->dev->name,
bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
bond->dev->name,
bond->params.downdelay * bond->params.miimon);
if (newval->value && bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
if (bond->params.arp_validate)
@@ -726,7 +783,8 @@ int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
return 0;
}
-int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_updelay_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
int value = newval->value;
@@ -743,15 +801,14 @@ int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
bond->params.miimon);
}
bond->params.updelay = value / bond->params.miimon;
- pr_info("%s: Setting up delay to %d.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
+ pr_info("%s: Setting up delay to %d\n",
+ bond->dev->name, bond->params.updelay * bond->params.miimon);
return 0;
}
-int bond_option_downdelay_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_downdelay_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
int value = newval->value;
@@ -768,37 +825,36 @@ int bond_option_downdelay_set(struct bonding *bond,
bond->params.miimon);
}
bond->params.downdelay = value / bond->params.miimon;
- pr_info("%s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: Setting down delay to %d\n",
+ bond->dev->name, bond->params.downdelay * bond->params.miimon);
return 0;
}
-int bond_option_use_carrier_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_use_carrier_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting use_carrier to %llu.\n",
+ pr_info("%s: Setting use_carrier to %llu\n",
bond->dev->name, newval->value);
bond->params.use_carrier = newval->value;
return 0;
}
-int bond_option_arp_interval_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_arp_interval_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting ARP monitoring interval to %llu.\n",
+ pr_info("%s: Setting ARP monitoring interval to %llu\n",
bond->dev->name, newval->value);
bond->params.arp_interval = newval->value;
if (newval->value) {
if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0])
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified\n",
bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
@@ -813,8 +869,7 @@ int bond_option_arp_interval_set(struct bonding *bond,
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_arp_rcv;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -857,19 +912,18 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
ind = bond_get_targets_ip(targets, 0); /* first free slot */
if (ind == -1) {
- pr_err("%s: ARP target table is full!\n",
- bond->dev->name);
+ pr_err("%s: ARP target table is full!\n", bond->dev->name);
return -EINVAL;
}
- pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+ pr_info("%s: Adding ARP target %pI4\n", bond->dev->name, &target);
_bond_options_arp_ip_target_set(bond, ind, target, jiffies);
return 0;
}
-int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
{
int ret;
@@ -881,7 +935,7 @@ int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
return ret;
}
-int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
+static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
{
__be32 *targets = bond->params.arp_targets;
struct list_head *iter;
@@ -897,17 +951,16 @@ int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
ind = bond_get_targets_ip(targets, target);
if (ind == -1) {
- pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+ pr_err("%s: unable to remove nonexistent ARP target %pI4\n",
bond->dev->name, &target);
return -EINVAL;
}
if (ind == 0 && !targets[1] && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
+ pr_warn("%s: Removing last arp target with arp_interval on\n",
bond->dev->name);
- pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
- &target);
+ pr_info("%s: Removing ARP target %pI4\n", bond->dev->name, &target);
/* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock);
@@ -938,8 +991,8 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond)
write_unlock_bh(&bond->lock);
}
-int bond_option_arp_ip_targets_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_arp_ip_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
int ret = -EPERM;
__be32 target;
@@ -955,7 +1008,7 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
else if (newval->string[0] == '-')
ret = bond_option_arp_ip_target_rem(bond, target);
else
- pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+ pr_err("no command found in arp_ip_targets file for bond %s - use +<addr> or -<addr>\n",
bond->dev->name);
} else {
target = newval->value;
@@ -965,10 +1018,10 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
return ret;
}
-int bond_option_arp_validate_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_arp_validate_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: setting arp_validate to %s (%llu).\n",
+ pr_info("%s: Setting arp_validate to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
if (bond->dev->flags & IFF_UP) {
@@ -982,17 +1035,18 @@ int bond_option_arp_validate_set(struct bonding *bond,
return 0;
}
-int bond_option_arp_all_targets_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_arp_all_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: setting arp_all_targets to %s (%llu).\n",
+ pr_info("%s: Setting arp_all_targets to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.arp_all_targets = newval->value;
return 0;
}
-int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_primary_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
char *p, *primary = newval->string;
struct list_head *iter;
@@ -1007,8 +1061,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
*p = '\0';
/* check to see if we are clearing primary */
if (!strlen(primary)) {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None\n", bond->dev->name);
bond->primary_slave = NULL;
memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
@@ -1017,7 +1070,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
+ pr_info("%s: Setting %s as primary slave\n",
bond->dev->name, slave->dev->name);
bond->primary_slave = slave;
strcpy(bond->params.primary, slave->dev->name);
@@ -1027,15 +1080,14 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
}
if (bond->primary_slave) {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None\n", bond->dev->name);
bond->primary_slave = NULL;
bond_select_active_slave(bond);
}
strncpy(bond->params.primary, primary, IFNAMSIZ);
bond->params.primary[IFNAMSIZ - 1] = 0;
- pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n",
+ pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet\n",
bond->dev->name, primary, bond->dev->name);
out:
@@ -1046,10 +1098,10 @@ out:
return 0;
}
-int bond_option_primary_reselect_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_primary_reselect_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: setting primary_reselect to %s (%llu).\n",
+ pr_info("%s: Setting primary_reselect to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.primary_reselect = newval->value;
@@ -1062,46 +1114,46 @@ int bond_option_primary_reselect_set(struct bonding *bond,
return 0;
}
-int bond_option_fail_over_mac_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_fail_over_mac_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting fail_over_mac to %s (%llu).\n",
+ pr_info("%s: Setting fail_over_mac to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.fail_over_mac = newval->value;
return 0;
}
-int bond_option_xmit_hash_policy_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: setting xmit hash policy to %s (%llu).\n",
+ pr_info("%s: Setting xmit hash policy to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.xmit_policy = newval->value;
return 0;
}
-int bond_option_resend_igmp_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_resend_igmp_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting resend_igmp to %llu.\n",
+ pr_info("%s: Setting resend_igmp to %llu\n",
bond->dev->name, newval->value);
bond->params.resend_igmp = newval->value;
return 0;
}
-int bond_option_num_peer_notif_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_num_peer_notif_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
bond->params.num_peer_notif = newval->value;
return 0;
}
-int bond_option_all_slaves_active_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_all_slaves_active_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
struct list_head *iter;
struct slave *slave;
@@ -1121,8 +1173,8 @@ int bond_option_all_slaves_active_set(struct bonding *bond,
return 0;
}
-int bond_option_min_links_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_min_links_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
pr_info("%s: Setting min links value to %llu\n",
bond->dev->name, newval->value);
@@ -1131,15 +1183,16 @@ int bond_option_min_links_set(struct bonding *bond,
return 0;
}
-int bond_option_lp_interval_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_lp_interval_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
bond->params.lp_interval = newval->value;
return 0;
}
-int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_pps_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
bond->params.packets_per_slave = newval->value;
if (newval->value > 0) {
@@ -1156,10 +1209,10 @@ int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
return 0;
}
-int bond_option_lacp_rate_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_lacp_rate_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting LACP rate to %s (%llu).\n",
+ pr_info("%s: Setting LACP rate to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.lacp_fast = newval->value;
bond_3ad_update_lacp_rate(bond);
@@ -1167,18 +1220,18 @@ int bond_option_lacp_rate_set(struct bonding *bond,
return 0;
}
-int bond_option_ad_select_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_ad_select_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
- pr_info("%s: Setting ad_select to %s (%llu).\n",
+ pr_info("%s: Setting ad_select to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.ad_select = newval->value;
return 0;
}
-int bond_option_queue_id_set(struct bonding *bond,
- struct bond_opt_value *newval)
+static int bond_option_queue_id_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
struct slave *slave, *update_slave;
struct net_device *sdev;
@@ -1200,8 +1253,7 @@ int bond_option_queue_id_set(struct bonding *bond,
goto err_no_cmd;
/* Check buffer length, valid ifname and queue id */
- if (strlen(newval->string) > IFNAMSIZ ||
- !dev_valid_name(newval->string) ||
+ if (!dev_valid_name(newval->string) ||
qid > bond->dev->real_num_tx_queues)
goto err_no_cmd;
@@ -1233,14 +1285,14 @@ out:
return ret;
err_no_cmd:
- pr_info("invalid input for queue_id set for %s.\n",
- bond->dev->name);
+ pr_info("invalid input for queue_id set for %s\n", bond->dev->name);
ret = -EPERM;
goto out;
}
-int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_slaves_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
{
char command[IFNAMSIZ + 1] = { 0, };
struct net_device *dev;
@@ -1255,7 +1307,7 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
dev = __dev_get_by_name(dev_net(bond->dev), ifname);
if (!dev) {
- pr_info("%s: Interface %s does not exist!\n",
+ pr_info("%s: interface %s does not exist!\n",
bond->dev->name, ifname);
ret = -ENODEV;
goto out;
@@ -1263,12 +1315,12 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
switch (command[0]) {
case '+':
- pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
+ pr_info("%s: Adding slave %s\n", bond->dev->name, dev->name);
ret = bond_enslave(bond->dev, dev);
break;
case '-':
- pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+ pr_info("%s: Removing slave %s\n", bond->dev->name, dev->name);
ret = bond_release(bond->dev, dev);
break;
@@ -1280,7 +1332,7 @@ out:
return ret;
err_no_cmd:
- pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+ pr_err("no command found in slaves file for bond %s - use +ifname or -ifname\n",
bond->dev->name);
ret = -EPERM;
goto out;
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
index 433d37f6940b..12be9e1bfb0c 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/drivers/net/bonding/bond_options.h
@@ -81,8 +81,8 @@ struct bonding;
struct bond_option {
int id;
- char *name;
- char *desc;
+ const char *name;
+ const char *desc;
u32 flags;
/* unsuppmodes is used to denote modes in which the option isn't
@@ -92,18 +92,19 @@ struct bond_option {
/* supported values which this option can have, can be a subset of
* BOND_OPTVAL_RANGE's value range
*/
- struct bond_opt_value *values;
+ const struct bond_opt_value *values;
- int (*set)(struct bonding *bond, struct bond_opt_value *val);
+ int (*set)(struct bonding *bond, const struct bond_opt_value *val);
};
int __bond_opt_set(struct bonding *bond, unsigned int option,
struct bond_opt_value *val);
int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
-struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
- struct bond_opt_value *val);
-struct bond_option *bond_opt_get(unsigned int option);
-struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
+
+const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+ struct bond_opt_value *val);
+const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
/* This helper is used to initialize a bond_opt_value structure for parameter
* passing. There should be either a valid string or value, but not both.
@@ -122,49 +123,6 @@ static inline void __bond_opt_init(struct bond_opt_value *optval,
#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
-int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval);
-int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval);
-int bond_option_xmit_hash_policy_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_arp_validate_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_arp_all_targets_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_fail_over_mac_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_arp_interval_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_arp_ip_targets_set(struct bonding *bond,
- struct bond_opt_value *newval);
void bond_option_arp_ip_targets_clear(struct bonding *bond);
-int bond_option_downdelay_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_updelay_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_lacp_rate_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_min_links_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_ad_select_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_num_peer_notif_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval);
-int bond_option_primary_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_primary_reselect_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_use_carrier_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_active_slave_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_queue_id_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_all_slaves_active_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_resend_igmp_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_lp_interval_set(struct bonding *bond,
- struct bond_opt_value *newval);
-int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval);
+
#endif /* _BOND_OPTIONS_H */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 3ac20e78eafc..013fdd0f45e9 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -65,13 +65,11 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
static void bond_info_show_master(struct seq_file *seq)
{
struct bonding *bond = seq->private;
- struct bond_opt_value *optval;
+ const struct bond_opt_value *optval;
struct slave *curr;
int i;
- read_lock(&bond->curr_slave_lock);
- curr = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
+ curr = rcu_dereference(bond->curr_active_slave);
seq_printf(seq, "Bonding Mode: %s",
bond_mode_name(bond->params.mode));
@@ -254,8 +252,8 @@ void bond_create_proc_entry(struct bonding *bond)
S_IRUGO, bn->proc_dir,
&bond_info_fops, bond);
if (bond->proc_entry == NULL)
- pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
- DRV_NAME, bond_dev->name);
+ pr_warn("Warning: Cannot create /proc/net/%s/%s\n",
+ DRV_NAME, bond_dev->name);
else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
}
@@ -281,8 +279,8 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
if (!bn->proc_dir)
- pr_warning("Warning: cannot create /proc/net/%s\n",
- DRV_NAME);
+ pr_warn("Warning: Cannot create /proc/net/%s\n",
+ DRV_NAME);
}
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 643fcc110299..0e8b268da0a0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -117,9 +117,9 @@ static ssize_t bonding_store_bonds(struct class *cls,
rv = bond_create(bn->net, ifname);
if (rv) {
if (rv == -EEXIST)
- pr_info("%s already exists.\n", ifname);
+ pr_info("%s already exists\n", ifname);
else
- pr_info("%s creation failed.\n", ifname);
+ pr_info("%s creation failed\n", ifname);
res = rv;
}
} else if (command[0] == '-') {
@@ -144,7 +144,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
return res;
err_no_cmd:
- pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n");
+ pr_err("no command found in bonding_masters - use +ifname or -ifname\n");
return -EPERM;
}
@@ -220,7 +220,7 @@ static ssize_t bonding_show_mode(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
@@ -251,7 +251,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
@@ -282,7 +282,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
bond->params.arp_validate);
@@ -314,7 +314,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
bond->params.arp_all_targets);
@@ -348,7 +348,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
@@ -505,7 +505,7 @@ static ssize_t bonding_show_lacp(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
@@ -558,7 +558,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
@@ -686,7 +686,7 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- struct bond_opt_value *val;
+ const struct bond_opt_value *val;
val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
@@ -1135,7 +1135,7 @@ int bond_create_sysfs(struct bond_net *bn)
/* Is someone being kinky and naming a device bonding_master? */
if (__dev_get_by_name(bn->net,
class_attr_bonding_masters.attr.name))
- pr_err("network device named %s already exists in sysfs",
+ pr_err("network device named %s already exists in sysfs\n",
class_attr_bonding_masters.attr.name);
ret = 0;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2b0fdec695f7..b8bdd0acc8f3 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -188,8 +188,9 @@ struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
int delay;
- unsigned long jiffies;
- unsigned long last_arp_rx;
+ /* all three in jiffies */
+ unsigned long last_link_up;
+ unsigned long last_rx;
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
s8 link; /* one of BOND_LINK_XXXX */
s8 new_link;
@@ -265,6 +266,11 @@ struct bonding {
#define bond_slave_get_rtnl(dev) \
((struct slave *) rtnl_dereference(dev->rx_handler_data))
+struct bond_vlan_tag {
+ __be16 vlan_proto;
+ unsigned short vlan_id;
+};
+
/**
* Returns NULL if the net_device does not belong to any of the bond's slaves
*
@@ -292,7 +298,7 @@ static inline void bond_set_active_slave(struct slave *slave)
{
if (slave->backup) {
slave->backup = 0;
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -300,7 +306,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
{
if (!slave->backup) {
slave->backup = 1;
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
}
}
@@ -312,7 +318,7 @@ static inline void bond_set_slave_state(struct slave *slave,
slave->backup = slave_state;
if (notify) {
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
slave->should_notify = 0;
} else {
if (slave->should_notify)
@@ -342,7 +348,7 @@ static inline void bond_slave_state_notify(struct bonding *bond)
bond_for_each_slave(bond, tmp, iter) {
if (tmp->should_notify) {
- rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC);
tmp->should_notify = 0;
}
}
@@ -374,6 +380,11 @@ static inline bool bond_is_active_slave(struct slave *slave)
#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
BOND_ARP_VALIDATE_BACKUP)
+#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1)
+#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \
+ BOND_ARP_FILTER)
+#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \
+ BOND_ARP_FILTER)
#define BOND_SLAVE_NOTIFY_NOW true
#define BOND_SLAVE_NOTIFY_LATER false
@@ -384,6 +395,12 @@ static inline int slave_do_arp_validate(struct bonding *bond,
return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
+static inline int slave_do_arp_validate_only(struct bonding *bond,
+ struct slave *slave)
+{
+ return bond->params.arp_validate & BOND_ARP_FILTER;
+}
+
/* Get the oldest arp which we've received on this slave for bond's
* arp_targets.
*/
@@ -403,14 +420,10 @@ static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
static inline unsigned long slave_last_rx(struct bonding *bond,
struct slave *slave)
{
- if (slave_do_arp_validate(bond, slave)) {
- if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
- return slave_oldest_target_arp_rx(bond, slave);
- else
- return slave->last_arp_rx;
- }
+ if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
+ return slave_oldest_target_arp_rx(bond, slave);
- return slave->dev->last_rx;
+ return slave->last_rx;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -487,8 +500,6 @@ void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
-int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
-int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
@@ -501,8 +512,6 @@ void bond_setup(struct net_device *bond_dev);
unsigned int bond_get_num_tx_queues(void);
int bond_netlink_init(void);
void bond_netlink_fini(void);
-int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
-int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
const char *bond_slave_link_status(s8 link);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 88a6a5810ec6..fc73865bb83a 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -204,7 +204,6 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
- skb->dev = ser->dev;
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
ret = netif_rx_ni(skb);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 155db68e13ba..ff54c0eb2052 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -554,7 +554,6 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
- skb->dev = cfspi->ndev;
/*
* Push received packet up the stack.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 6efe27458116..f07fa89b5fd5 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -420,7 +420,11 @@ static void at91_chip_start(struct net_device *dev)
at91_transceiver_switch(priv, 1);
/* enable chip */
- at91_write(priv, AT91_MR, AT91_MR_CANEN);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ reg_mr = AT91_MR_CANEN | AT91_MR_ABM;
+ else
+ reg_mr = AT91_MR_CANEN;
+ at91_write(priv, AT91_MR, reg_mr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -1190,6 +1194,7 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_open = at91_open,
.ndo_stop = at91_close,
.ndo_start_xmit = at91_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
@@ -1341,7 +1346,8 @@ static int at91_can_probe(struct platform_device *pdev)
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_mode = at91_set_mode;
priv->can.do_get_berr_counter = at91_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
priv->dev = dev;
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 8d2b89a12e09..543ecceb33e9 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -528,6 +528,7 @@ static const struct net_device_ops bfin_can_netdev_ops = {
.ndo_open = bfin_can_open,
.ndo_stop = bfin_can_close,
.ndo_start_xmit = bfin_can_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static int bfin_can_probe(struct platform_device *pdev)
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 951bfede8f3d..a5c8dcfa8357 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -114,6 +114,14 @@
IF_COMM_CONTROL | IF_COMM_TXRQST | \
IF_COMM_DATAA | IF_COMM_DATAB)
+/* For the low buffers we clear the interrupt bit, but keep newdat */
+#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
+ IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
+ IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* For the high buffers we clear the interrupt bit and newdat */
+#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_TXRQST)
+
/* IFx arbitration */
#define IF_ARB_MSGVAL BIT(15)
#define IF_ARB_MSGXTD BIT(14)
@@ -122,7 +130,6 @@
/* IFx message control */
#define IF_MCONT_NEWDAT BIT(15)
#define IF_MCONT_MSGLST BIT(14)
-#define IF_MCONT_CLR_MSGLST (0 << 14)
#define IF_MCONT_INTPND BIT(13)
#define IF_MCONT_UMASK BIT(12)
#define IF_MCONT_TXIE BIT(11)
@@ -133,31 +140,10 @@
#define IF_MCONT_DLC_MASK 0xf
/*
- * IFx register masks:
- * allow easy operation on 16-bit registers when the
- * argument is 32-bit instead
+ * Use IF1 for RX and IF2 for TX
*/
-#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
-#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
-
-/* message object split */
-#define C_CAN_NO_OF_OBJECTS 32
-#define C_CAN_MSG_OBJ_RX_NUM 16
-#define C_CAN_MSG_OBJ_TX_NUM 16
-
-#define C_CAN_MSG_OBJ_RX_FIRST 1
-#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
- C_CAN_MSG_OBJ_RX_NUM - 1)
-
-#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
-#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
- C_CAN_MSG_OBJ_TX_NUM - 1)
-
-#define C_CAN_MSG_OBJ_RX_SPLIT 9
-#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
-
-#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
-#define RECEIVE_OBJECT_BITS 0x0000ffff
+#define IF_RX 0
+#define IF_TX 1
/* status interrupt */
#define STATUS_INTERRUPT 0x8000
@@ -246,10 +232,9 @@ static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
C_CAN_MSG_OBJ_TX_FIRST;
}
-static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
+static inline int get_tx_echo_msg_obj(int txecho)
{
- return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
- C_CAN_MSG_OBJ_TX_FIRST;
+ return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
}
static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
@@ -366,18 +351,6 @@ static void c_can_write_msg_object(struct net_device *dev,
c_can_object_put(dev, iface, objno, IF_COMM_ALL);
}
-static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
- int iface, int ctrl_mask,
- int obj)
-{
- struct c_can_priv *priv = netdev_priv(dev);
-
- priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
- c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
-
-}
-
static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
int iface,
int ctrl_mask)
@@ -387,45 +360,27 @@ static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- ctrl_mask & ~(IF_MCONT_MSGLST |
- IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+ ctrl_mask & ~IF_MCONT_NEWDAT);
c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
}
}
-static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
- int iface, int ctrl_mask,
- int obj)
-{
- struct c_can_priv *priv = netdev_priv(dev);
-
- priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- ctrl_mask & ~(IF_MCONT_MSGLST |
- IF_MCONT_INTPND | IF_MCONT_NEWDAT));
- c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
-}
-
-static void c_can_handle_lost_msg_obj(struct net_device *dev,
- int iface, int objno)
+static int c_can_handle_lost_msg_obj(struct net_device *dev,
+ int iface, int objno, u32 ctrl)
{
- struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
+ struct c_can_priv *priv = netdev_priv(dev);
struct can_frame *frame;
+ struct sk_buff *skb;
- netdev_err(dev, "msg lost in buffer %d\n", objno);
-
- c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
-
- priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- IF_MCONT_CLR_MSGLST);
-
- c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
+ ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
+ c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
/* create an error msg */
skb = alloc_can_err_skb(dev, &frame);
if (unlikely(!skb))
- return;
+ return 0;
frame->can_id |= CAN_ERR_CRTL;
frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
@@ -433,6 +388,7 @@ static void c_can_handle_lost_msg_obj(struct net_device *dev,
stats->rx_over_errors++;
netif_receive_skb(skb);
+ return 1;
}
static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
@@ -477,9 +433,6 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
-
- can_led_event(dev, CAN_LED_EVENT_RX);
-
return 0;
}
@@ -548,10 +501,12 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
+ spin_lock_bh(&priv->xmit_lock);
msg_obj_no = get_tx_next_msg_obj(priv);
/* prepare message object for transmission */
- c_can_write_msg_object(dev, 0, frame, msg_obj_no);
+ c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
+ priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
/*
@@ -562,10 +517,26 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
(priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
netif_stop_queue(dev);
+ spin_unlock_bh(&priv->xmit_lock);
return NETDEV_TX_OK;
}
+static int c_can_wait_for_ctrl_init(struct net_device *dev,
+ struct c_can_priv *priv, u32 init)
+{
+ int retry = 0;
+
+ while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
+ udelay(10);
+ if (retry++ > 1000) {
+ netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
static int c_can_set_bittiming(struct net_device *dev)
{
unsigned int reg_btr, reg_brpe, ctrl_save;
@@ -573,6 +544,7 @@ static int c_can_set_bittiming(struct net_device *dev)
u32 ten_bit_brp;
struct c_can_priv *priv = netdev_priv(dev);
const struct can_bittiming *bt = &priv->can.bittiming;
+ int res;
/* c_can provides a 6-bit brp and 4-bit brpe fields */
ten_bit_brp = bt->brp - 1;
@@ -590,13 +562,17 @@ static int c_can_set_bittiming(struct net_device *dev)
"setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
- priv->write_reg(priv, C_CAN_CTRL_REG,
- ctrl_save | CONTROL_CCE | CONTROL_INIT);
+ ctrl_save &= ~CONTROL_INIT;
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
+ res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
+ if (res)
+ return res;
+
priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
- return 0;
+ return c_can_wait_for_ctrl_init(dev, priv, 0);
}
/*
@@ -614,14 +590,14 @@ static void c_can_configure_msg_objects(struct net_device *dev)
/* first invalidate all message objects */
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
- c_can_inval_msg_object(dev, 0, i);
+ c_can_inval_msg_object(dev, IF_RX, i);
/* setup receive message objects */
for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
- c_can_setup_receive_object(dev, 0, i, 0, 0,
+ c_can_setup_receive_object(dev, IF_RX, i, 0, 0,
(IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
- c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
+ c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
}
@@ -631,7 +607,7 @@ static void c_can_configure_msg_objects(struct net_device *dev)
* - set operating mode
* - configure message objects
*/
-static void c_can_chip_config(struct net_device *dev)
+static int c_can_chip_config(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
@@ -668,15 +644,18 @@ static void c_can_chip_config(struct net_device *dev)
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
/* set bittiming params */
- c_can_set_bittiming(dev);
+ return c_can_set_bittiming(dev);
}
-static void c_can_start(struct net_device *dev)
+static int c_can_start(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
+ int err;
/* basic c_can configuration */
- c_can_chip_config(dev);
+ err = c_can_chip_config(dev);
+ if (err)
+ return err;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -685,6 +664,8 @@ static void c_can_start(struct net_device *dev)
/* enable status change, error and module interrupts */
c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+
+ return 0;
}
static void c_can_stop(struct net_device *dev)
@@ -700,9 +681,13 @@ static void c_can_stop(struct net_device *dev)
static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
{
+ int err;
+
switch (mode) {
case CAN_MODE_START:
- c_can_start(dev);
+ err = c_can_start(dev);
+ if (err)
+ return err;
netif_wake_queue(dev);
break;
default:
@@ -740,8 +725,6 @@ static int c_can_get_berr_counter(const struct net_device *dev,
}
/*
- * theory of operation:
- *
* priv->tx_echo holds the number of the oldest can_frame put for
* transmission into the hardware, but not yet ACKed by the CAN tx
* complete IRQ.
@@ -752,33 +735,113 @@ static int c_can_get_berr_counter(const struct net_device *dev,
*/
static void c_can_do_tx(struct net_device *dev)
{
- u32 val;
- u32 msg_obj_no;
struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
+ u32 val, obj, pkts = 0, bytes = 0;
- for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
- msg_obj_no = get_tx_echo_msg_obj(priv);
+ spin_lock_bh(&priv->xmit_lock);
+
+ for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+ obj = get_tx_echo_msg_obj(priv->tx_echo);
val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
- if (!(val & (1 << (msg_obj_no - 1)))) {
- can_get_echo_skb(dev,
- msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
- c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL);
- stats->tx_bytes += priv->read_reg(priv,
- C_CAN_IFACE(MSGCTRL_REG, 0))
- & IF_MCONT_DLC_MASK;
- stats->tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
- c_can_inval_msg_object(dev, 0, msg_obj_no);
- } else {
+
+ if (val & (1 << (obj - 1)))
break;
- }
+
+ can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
+ bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
+ pkts++;
+ c_can_inval_msg_object(dev, IF_TX, obj);
}
/* restart queue if wrap-up or if queue stalled on last pkt */
if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
netif_wake_queue(dev);
+
+ spin_unlock_bh(&priv->xmit_lock);
+
+ if (pkts) {
+ stats->tx_bytes += bytes;
+ stats->tx_packets += pkts;
+ can_led_event(dev, CAN_LED_EVENT_TX);
+ }
+}
+
+/*
+ * If we have a gap in the pending bits, that means we either
+ * raced with the hardware or failed to readout all upper
+ * objects in the last run due to quota limit.
+ */
+static u32 c_can_adjust_pending(u32 pend)
+{
+ u32 weight, lasts;
+
+ if (pend == RECEIVE_OBJECT_BITS)
+ return pend;
+
+ /*
+ * If the last set bit is larger than the number of pending
+ * bits we have a gap.
+ */
+ weight = hweight32(pend);
+ lasts = fls(pend);
+
+ /* If the bits are linear, nothing to do */
+ if (lasts == weight)
+ return pend;
+
+ /*
+ * Find the first set bit after the gap. We walk backwards
+ * from the last set bit.
+ */
+ for (lasts--; pend & (1 << (lasts - 1)); lasts--);
+
+ return pend & ~((1 << lasts) - 1);
+}
+
+static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
+ u32 pend, int quota)
+{
+ u32 pkts = 0, ctrl, obj, mcmd;
+
+ while ((obj = ffs(pend)) && quota > 0) {
+ pend &= ~BIT(obj - 1);
+
+ mcmd = obj < C_CAN_MSG_RX_LOW_LAST ?
+ IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
+
+ c_can_object_get(dev, IF_RX, obj, mcmd);
+ ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
+
+ if (ctrl & IF_MCONT_MSGLST) {
+ int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
+
+ pkts += n;
+ quota -= n;
+ continue;
+ }
+
+ /*
+ * This really should not happen, but this covers some
+ * odd HW behaviour. Do not remove that unless you
+ * want to brick your machine.
+ */
+ if (!(ctrl & IF_MCONT_NEWDAT))
+ continue;
+
+ /* read the data from the message object */
+ c_can_read_msg_object(dev, IF_RX, ctrl);
+
+ if (obj == C_CAN_MSG_RX_LOW_LAST)
+ /* activate all lower message objects */
+ c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl);
+
+ pkts++;
+ quota--;
+ }
+
+ return pkts;
}
/*
@@ -805,10 +868,8 @@ static void c_can_do_tx(struct net_device *dev)
*/
static int c_can_do_rx_poll(struct net_device *dev, int quota)
{
- u32 num_rx_pkts = 0;
- unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
- u16 val;
+ u32 pkts = 0, pend = 0, toread, n;
/*
* It is faster to read only one 16bit register. This is only possible
@@ -817,49 +878,31 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
"Implementation does not support more message objects than 16");
- while (quota > 0 && (val = priv->read_reg(priv, C_CAN_INTPND1_REG))) {
- while ((msg_obj = ffs(val)) && quota > 0) {
- val &= ~BIT(msg_obj - 1);
-
- c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
- ~IF_COMM_TXRQST);
- msg_ctrl_save = priv->read_reg(priv,
- C_CAN_IFACE(MSGCTRL_REG, 0));
-
- if (msg_ctrl_save & IF_MCONT_MSGLST) {
- c_can_handle_lost_msg_obj(dev, 0, msg_obj);
- num_rx_pkts++;
- quota--;
- continue;
- }
-
- if (msg_ctrl_save & IF_MCONT_EOB)
- return num_rx_pkts;
-
- if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
- continue;
-
- /* read the data from the message object */
- c_can_read_msg_object(dev, 0, msg_ctrl_save);
-
- if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
- c_can_mark_rx_msg_obj(dev, 0,
- msg_ctrl_save, msg_obj);
- else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
- /* activate this msg obj */
- c_can_activate_rx_msg_obj(dev, 0,
- msg_ctrl_save, msg_obj);
- else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
- /* activate all lower message objects */
- c_can_activate_all_lower_rx_msg_obj(dev,
- 0, msg_ctrl_save);
-
- num_rx_pkts++;
- quota--;
+ while (quota > 0) {
+ if (!pend) {
+ pend = priv->read_reg(priv, C_CAN_INTPND1_REG);
+ if (!pend)
+ break;
+ /*
+ * If the pending field has a gap, handle the
+ * bits above the gap first.
+ */
+ toread = c_can_adjust_pending(pend);
+ } else {
+ toread = pend;
}
+ /* Remove the bits from pend */
+ pend &= ~toread;
+ /* Read the objects */
+ n = c_can_read_objects(dev, priv, toread, quota);
+ pkts += n;
+ quota -= n;
}
- return num_rx_pkts;
+ if (pkts)
+ can_led_event(dev, CAN_LED_EVENT_RX);
+
+ return pkts;
}
static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
@@ -1133,17 +1176,20 @@ static int c_can_open(struct net_device *dev)
goto exit_irq_fail;
}
- napi_enable(&priv->napi);
+ /* start the c_can controller */
+ err = c_can_start(dev);
+ if (err)
+ goto exit_start_fail;
can_led_event(dev, CAN_LED_EVENT_OPEN);
- /* start the c_can controller */
- c_can_start(dev);
-
+ napi_enable(&priv->napi);
netif_start_queue(dev);
return 0;
+exit_start_fail:
+ free_irq(dev->irq, dev);
exit_irq_fail:
close_candev(dev);
exit_open_fail:
@@ -1180,6 +1226,7 @@ struct net_device *alloc_c_can_dev(void)
return NULL;
priv = netdev_priv(dev);
+ spin_lock_init(&priv->xmit_lock);
netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
priv->dev = dev;
@@ -1260,15 +1307,16 @@ int c_can_power_up(struct net_device *dev)
if (time_after(jiffies, time_out))
return -ETIMEDOUT;
- c_can_start(dev);
-
- return 0;
+ return c_can_start(dev);
}
EXPORT_SYMBOL_GPL(c_can_power_up);
#endif
void free_c_can_dev(struct net_device *dev)
{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ netif_napi_del(&priv->napi);
free_candev(dev);
}
EXPORT_SYMBOL_GPL(free_c_can_dev);
@@ -1277,6 +1325,7 @@ static const struct net_device_ops c_can_netdev_ops = {
.ndo_open = c_can_open,
.ndo_stop = c_can_close,
.ndo_start_xmit = c_can_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
int register_c_can_dev(struct net_device *dev)
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index d2e1c21b143f..faa8404162b3 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,6 +22,33 @@
#ifndef C_CAN_H
#define C_CAN_H
+/*
+ * IFx register masks:
+ * allow easy operation on 16-bit registers when the
+ * argument is 32-bit instead
+ */
+#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
+#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
+
+/* message object split */
+#define C_CAN_NO_OF_OBJECTS 32
+#define C_CAN_MSG_OBJ_RX_NUM 16
+#define C_CAN_MSG_OBJ_TX_NUM 16
+
+#define C_CAN_MSG_OBJ_RX_FIRST 1
+#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
+ C_CAN_MSG_OBJ_RX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
+#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
+ C_CAN_MSG_OBJ_TX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_RX_SPLIT 9
+#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
+
+#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
+#define RECEIVE_OBJECT_BITS 0x0000ffff
+
enum reg {
C_CAN_CTRL_REG = 0,
C_CAN_CTRL_EX_REG,
@@ -156,6 +183,7 @@ struct c_can_priv {
struct napi_struct napi;
struct net_device *dev;
struct device *device;
+ spinlock_t xmit_lock;
int tx_object;
int current_status;
int last_status;
@@ -172,6 +200,7 @@ struct c_can_priv {
u32 __iomem *raminit_ctrlreg;
unsigned int instance;
void (*raminit) (const struct c_can_priv *priv, bool enable);
+ u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
};
struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index d66ac265269c..806d92753427 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -37,8 +37,10 @@
#include "c_can.h"
-#define CAN_RAMINIT_START_MASK(i) (1 << (i))
-
+#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
+#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
+#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
+static DEFINE_SPINLOCK(raminit_lock);
/*
* 16-bit c_can registers can be arranged differently in the memory
* architecture of different implementations. For example: 16-bit
@@ -69,16 +71,41 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
writew(val, priv->base + 2 * priv->regs[index]);
}
+static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
+ u32 val)
+{
+ /* We look only at the bits of our instance. */
+ val &= mask;
+ while ((readl(priv->raminit_ctrlreg) & mask) != val)
+ udelay(1);
+}
+
static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
{
- u32 val;
-
- val = readl(priv->raminit_ctrlreg);
- if (enable)
- val |= CAN_RAMINIT_START_MASK(priv->instance);
- else
- val &= ~CAN_RAMINIT_START_MASK(priv->instance);
- writel(val, priv->raminit_ctrlreg);
+ u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
+ u32 ctrl;
+
+ spin_lock(&raminit_lock);
+
+ ctrl = readl(priv->raminit_ctrlreg);
+ /* We clear the done and start bit first. The start bit is
+ * looking at the 0 -> transition, but is not self clearing;
+ * And we clear the init done bit as well.
+ */
+ ctrl &= ~CAN_RAMINIT_START_MASK(priv->instance);
+ ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
+ writel(ctrl, priv->raminit_ctrlreg);
+ ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
+ c_can_hw_raminit_wait(priv, ctrl, mask);
+
+ if (enable) {
+ /* Set start bit and wait for the done bit. */
+ ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
+ writel(ctrl, priv->raminit_ctrlreg);
+ ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
+ c_can_hw_raminit_wait(priv, ctrl, mask);
+ }
+ spin_unlock(&raminit_lock);
}
static struct platform_device_id c_can_id_table[] = {
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 0f12abf6591c..d8379278d648 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -823,6 +823,7 @@ static const struct net_device_ops cc770_netdev_ops = {
.ndo_open = cc770_open,
.ndo_stop = cc770_close,
.ndo_start_xmit = cc770_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
int register_cc770dev(struct net_device *dev)
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index fc59bc6f040b..c7a260478749 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -99,10 +99,10 @@ static int can_update_spt(const struct can_bittiming_const *btc,
return 1000 * (tseg + 1 - *tseg2) / (tseg + 1);
}
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
- const struct can_bittiming_const *btc = priv->bittiming_const;
long rate, best_rate = 0;
long best_error = 1000000000, error = 0;
int best_tseg = 0, best_brp = 0, brp = 0;
@@ -110,9 +110,6 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
int spt_error = 1000, spt = 0, sampl_pt;
u64 v64;
- if (!priv->bittiming_const)
- return -ENOTSUPP;
-
/* Use CIA recommended sample points */
if (bt->sample_point) {
sampl_pt = bt->sample_point;
@@ -204,7 +201,8 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
return 0;
}
#else /* !CONFIG_CAN_CALC_BITTIMING */
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
{
netdev_err(dev, "bit-timing calculation not available\n");
return -EINVAL;
@@ -217,16 +215,13 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
- const struct can_bittiming_const *btc = priv->bittiming_const;
int tseg1, alltseg;
u64 brp64;
- if (!priv->bittiming_const)
- return -ENOTSUPP;
-
tseg1 = bt->prop_seg + bt->phase_seg1;
if (!bt->sjw)
bt->sjw = 1;
@@ -254,26 +249,29 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt)
return 0;
}
-static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
{
- struct can_priv *priv = netdev_priv(dev);
int err;
/* Check if the CAN device has bit-timing parameters */
- if (priv->bittiming_const) {
+ if (!btc)
+ return -ENOTSUPP;
- /* Non-expert mode? Check if the bitrate has been pre-defined */
- if (!bt->tq)
- /* Determine bit-timing parameters */
- err = can_calc_bittiming(dev, bt);
- else
- /* Check bit-timing params and calculate proper brp */
- err = can_fixup_bittiming(dev, bt);
- if (err)
- return err;
- }
+ /*
+ * Depending on the given can_bittiming parameter structure the CAN
+ * timing parameters are calculated based on the provided bitrate OR
+ * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+ * provided directly which are then checked and fixed up.
+ */
+ if (!bt->tq && bt->bitrate)
+ err = can_calc_bittiming(dev, bt, btc);
+ else if (bt->tq && !bt->bitrate)
+ err = can_fixup_bittiming(dev, bt, btc);
+ else
+ err = -EINVAL;
- return 0;
+ return err;
}
/*
@@ -317,7 +315,9 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
BUG_ON(idx >= priv->echo_skb_max);
/* check flag whether this packet has to be looped back */
- if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
+ if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+ (skb->protocol != htons(ETH_P_CAN) &&
+ skb->protocol != htons(ETH_P_CANFD))) {
kfree_skb(skb);
return;
}
@@ -329,7 +329,6 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
return;
/* make settings for echo to reduce code in irq context */
- skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->dev = dev;
@@ -512,6 +511,30 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
EXPORT_SYMBOL_GPL(alloc_can_skb);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct canfd_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CANFD);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
+ memset(*cfd, 0, sizeof(struct canfd_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -572,6 +595,39 @@ void free_candev(struct net_device *dev)
EXPORT_SYMBOL_GPL(free_candev);
/*
+ * changing MTU and control mode for CAN/CANFD devices
+ */
+int can_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* allow change of MTU according to the CANFD ability of the device */
+ switch (new_mtu) {
+ case CAN_MTU:
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+ break;
+
+ case CANFD_MTU:
+ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
+ return -EINVAL;
+
+ priv->ctrlmode |= CAN_CTRLMODE_FD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_change_mtu);
+
+/*
* Common open function when the device gets opened.
*
* This function should be called in the open function of the device
@@ -581,11 +637,19 @@ int open_candev(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- if (!priv->bittiming.tq && !priv->bittiming.bitrate) {
+ if (!priv->bittiming.bitrate) {
netdev_err(dev, "bit-timing not yet defined\n");
return -EINVAL;
}
+ /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+ if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+ (!priv->data_bittiming.bitrate ||
+ (priv->data_bittiming.bitrate < priv->bittiming.bitrate))) {
+ netdev_err(dev, "incorrect/missing data bit-timing\n");
+ return -EINVAL;
+ }
+
/* Switch carrier on if device was stopped while in bus-off state */
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
@@ -624,6 +688,10 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
= { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
[IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+ [IFLA_CAN_DATA_BITTIMING]
+ = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_DATA_BITTIMING_CONST]
+ = { .len = sizeof(struct can_bittiming_const) },
};
static int can_changelink(struct net_device *dev,
@@ -642,9 +710,7 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- if ((!bt.bitrate && !bt.tq) || (bt.bitrate && bt.tq))
- return -EINVAL;
- err = can_get_bittiming(dev, &bt);
+ err = can_get_bittiming(dev, &bt, priv->bittiming_const);
if (err)
return err;
memcpy(&priv->bittiming, &bt, sizeof(bt));
@@ -668,6 +734,12 @@ static int can_changelink(struct net_device *dev,
return -EOPNOTSUPP;
priv->ctrlmode &= ~cm->mask;
priv->ctrlmode |= cm->flags;
+
+ /* CAN_CTRLMODE_FD can only be set when driver supports FD */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+ else
+ dev->mtu = CAN_MTU;
}
if (data[IFLA_CAN_RESTART_MS]) {
@@ -686,6 +758,27 @@ static int can_changelink(struct net_device *dev,
return err;
}
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming dbt;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
+ sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const);
+ if (err)
+ return err;
+ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+
+ if (priv->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = priv->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
+ }
+
return 0;
}
@@ -694,7 +787,8 @@ static size_t can_get_size(const struct net_device *dev)
struct can_priv *priv = netdev_priv(dev);
size_t size = 0;
- size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+ if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
@@ -703,6 +797,10 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ size += nla_total_size(sizeof(struct can_bittiming));
+ if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(struct can_bittiming_const));
return size;
}
@@ -716,19 +814,34 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if (nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming) ||
+
+ if ((priv->bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_BITTIMING,
+ sizeof(priv->bittiming), &priv->bittiming)) ||
+
(priv->bittiming_const &&
nla_put(skb, IFLA_CAN_BITTIMING_CONST,
sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+
nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
nla_put_u32(skb, IFLA_CAN_STATE, state) ||
nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+
(priv->do_get_berr_counter &&
!priv->do_get_berr_counter(dev, &bec) &&
- nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)))
+ nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+
+ (priv->data_bittiming.bitrate &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING,
+ sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+
+ (priv->data_bittiming_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ sizeof(*priv->data_bittiming_const),
+ priv->data_bittiming_const)))
return -EMSGSIZE;
+
return 0;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 61376abdab39..f425ec2c7839 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1011,6 +1011,7 @@ static const struct net_device_ops flexcan_netdev_ops = {
.ndo_open = flexcan_open,
.ndo_stop = flexcan_close,
.ndo_start_xmit = flexcan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static int register_flexcandev(struct net_device *dev)
@@ -1132,9 +1133,9 @@ static int flexcan_probe(struct platform_device *pdev)
of_id = of_match_device(flexcan_of_match, &pdev->dev);
if (of_id) {
devtype_data = of_id->data;
- } else if (pdev->id_entry->driver_data) {
+ } else if (platform_get_device_id(pdev)->driver_data) {
devtype_data = (struct flexcan_devtype_data *)
- pdev->id_entry->driver_data;
+ platform_get_device_id(pdev)->driver_data;
} else {
return -ENODEV;
}
@@ -1201,8 +1202,7 @@ static int flexcan_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int flexcan_suspend(struct device *device)
+static int __maybe_unused flexcan_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
@@ -1221,7 +1221,7 @@ static int flexcan_suspend(struct device *device)
return 0;
}
-static int flexcan_resume(struct device *device)
+static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
@@ -1233,7 +1233,6 @@ static int flexcan_resume(struct device *device)
}
return flexcan_chip_enable(priv);
}
-#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index ab506d6cab37..3fd9fd942c6e 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1578,6 +1578,7 @@ static const struct net_device_ops grcan_netdev_ops = {
.ndo_open = grcan_open,
.ndo_stop = grcan_close,
.ndo_start_xmit = grcan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static int grcan_setup_netdev(struct platform_device *ofdev,
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 71594e5676fd..2382c04dc780 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -198,9 +198,6 @@ struct ican3_dev {
struct net_device *ndev;
struct napi_struct napi;
- /* Device for printing */
- struct device *dev;
-
/* module number */
unsigned int num;
@@ -295,7 +292,7 @@ static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
xord = locl ^ peer;
if ((xord & MSYNC_RB_MASK) == 0x00) {
- dev_dbg(mod->dev, "no mbox for reading\n");
+ netdev_dbg(mod->ndev, "no mbox for reading\n");
return -ENOMEM;
}
@@ -340,7 +337,7 @@ static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
xord = locl ^ peer;
if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) {
- dev_err(mod->dev, "no mbox for writing\n");
+ netdev_err(mod->ndev, "no mbox for writing\n");
return -ENOMEM;
}
@@ -542,7 +539,7 @@ static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
memcpy_fromio(&desc, desc_addr, sizeof(desc));
if (!(desc.control & DESC_VALID)) {
- dev_dbg(mod->dev, "%s: no free buffers\n", __func__);
+ netdev_dbg(mod->ndev, "%s: no free buffers\n", __func__);
return -ENOMEM;
}
@@ -573,7 +570,7 @@ static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
memcpy_fromio(&desc, desc_addr, sizeof(desc));
if (!(desc.control & DESC_VALID)) {
- dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__);
+ netdev_dbg(mod->ndev, "%s: no buffers to recv\n", __func__);
return -ENOMEM;
}
@@ -883,7 +880,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
*/
static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg)
{
- dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data);
+ netdev_dbg(mod->ndev, "IDVERS response: %s\n", msg->data);
}
static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
@@ -899,7 +896,7 @@ static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
* error frame for userspace
*/
if (msg->spec == MSG_MSGLOST) {
- dev_err(mod->dev, "lost %d control messages\n", msg->data[0]);
+ netdev_err(mod->ndev, "lost %d control messages\n", msg->data[0]);
return;
}
@@ -939,13 +936,13 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* we can only handle the SJA1000 part */
if (msg->data[1] != CEVTIND_CHIP_SJA1000) {
- dev_err(mod->dev, "unable to handle errors on non-SJA1000\n");
+ netdev_err(mod->ndev, "unable to handle errors on non-SJA1000\n");
return -ENODEV;
}
/* check the message length for sanity */
if (le16_to_cpu(msg->len) < 6) {
- dev_err(mod->dev, "error message too short\n");
+ netdev_err(mod->ndev, "error message too short\n");
return -EINVAL;
}
@@ -967,7 +964,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
*/
if (isrc == CEVTIND_BEI) {
int ret;
- dev_dbg(mod->dev, "bus error interrupt\n");
+ netdev_dbg(mod->ndev, "bus error interrupt\n");
/* TX error */
if (!(ecc & ECC_DIR)) {
@@ -983,7 +980,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
*/
ret = ican3_set_buserror(mod, 1);
if (ret) {
- dev_err(mod->dev, "unable to re-enable bus-error\n");
+ netdev_err(mod->ndev, "unable to re-enable bus-error\n");
return ret;
}
@@ -998,7 +995,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* data overrun interrupt */
if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
- dev_dbg(mod->dev, "data overrun interrupt\n");
+ netdev_dbg(mod->ndev, "data overrun interrupt\n");
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
@@ -1007,7 +1004,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* error warning + passive interrupt */
if (isrc == CEVTIND_EI) {
- dev_dbg(mod->dev, "error warning + passive interrupt\n");
+ netdev_dbg(mod->ndev, "error warning + passive interrupt\n");
if (status & SR_BS) {
state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
@@ -1088,7 +1085,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
complete(&mod->termination_comp);
break;
default:
- dev_err(mod->dev, "received an unknown inquiry response\n");
+ netdev_err(mod->ndev, "received an unknown inquiry response\n");
break;
}
}
@@ -1096,7 +1093,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
static void ican3_handle_unknown_message(struct ican3_dev *mod,
struct ican3_msg *msg)
{
- dev_warn(mod->dev, "received unknown message: spec 0x%.2x length %d\n",
+ netdev_warn(mod->ndev, "received unknown message: spec 0x%.2x length %d\n",
msg->spec, le16_to_cpu(msg->len));
}
@@ -1105,7 +1102,7 @@ static void ican3_handle_unknown_message(struct ican3_dev *mod,
*/
static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
{
- dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
+ netdev_dbg(mod->ndev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
mod->num, msg->spec, le16_to_cpu(msg->len));
switch (msg->spec) {
@@ -1406,7 +1403,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
msleep(10);
} while (time_before(jiffies, start + HZ / 4));
- dev_err(mod->dev, "failed to reset CAN module\n");
+ netdev_err(mod->ndev, "failed to reset CAN module\n");
return -ETIMEDOUT;
}
@@ -1425,7 +1422,7 @@ static int ican3_startup_module(struct ican3_dev *mod)
ret = ican3_reset_module(mod);
if (ret) {
- dev_err(mod->dev, "unable to reset module\n");
+ netdev_err(mod->ndev, "unable to reset module\n");
return ret;
}
@@ -1434,41 +1431,41 @@ static int ican3_startup_module(struct ican3_dev *mod)
ret = ican3_msg_connect(mod);
if (ret) {
- dev_err(mod->dev, "unable to connect to module\n");
+ netdev_err(mod->ndev, "unable to connect to module\n");
return ret;
}
ican3_init_new_host_interface(mod);
ret = ican3_msg_newhostif(mod);
if (ret) {
- dev_err(mod->dev, "unable to switch to new-style interface\n");
+ netdev_err(mod->ndev, "unable to switch to new-style interface\n");
return ret;
}
/* default to "termination on" */
ret = ican3_set_termination(mod, true);
if (ret) {
- dev_err(mod->dev, "unable to enable termination\n");
+ netdev_err(mod->ndev, "unable to enable termination\n");
return ret;
}
/* default to "bus errors enabled" */
ret = ican3_set_buserror(mod, 1);
if (ret) {
- dev_err(mod->dev, "unable to set bus-error\n");
+ netdev_err(mod->ndev, "unable to set bus-error\n");
return ret;
}
ican3_init_fast_host_interface(mod);
ret = ican3_msg_fasthostif(mod);
if (ret) {
- dev_err(mod->dev, "unable to switch to fast host interface\n");
+ netdev_err(mod->ndev, "unable to switch to fast host interface\n");
return ret;
}
ret = ican3_set_id_filter(mod, true);
if (ret) {
- dev_err(mod->dev, "unable to set acceptance filter\n");
+ netdev_err(mod->ndev, "unable to set acceptance filter\n");
return ret;
}
@@ -1487,14 +1484,14 @@ static int ican3_open(struct net_device *ndev)
/* open the CAN layer */
ret = open_candev(ndev);
if (ret) {
- dev_err(mod->dev, "unable to start CAN layer\n");
+ netdev_err(mod->ndev, "unable to start CAN layer\n");
return ret;
}
/* bring the bus online */
ret = ican3_set_bus_state(mod, true);
if (ret) {
- dev_err(mod->dev, "unable to set bus-on\n");
+ netdev_err(mod->ndev, "unable to set bus-on\n");
close_candev(ndev);
return ret;
}
@@ -1518,7 +1515,7 @@ static int ican3_stop(struct net_device *ndev)
/* bring the bus offline, stop receiving packets */
ret = ican3_set_bus_state(mod, false);
if (ret) {
- dev_err(mod->dev, "unable to set bus-off\n");
+ netdev_err(mod->ndev, "unable to set bus-off\n");
return ret;
}
@@ -1545,7 +1542,7 @@ static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
/* check that we can actually transmit */
if (!ican3_txok(mod)) {
- dev_err(mod->dev, "BUG: no free descriptors\n");
+ netdev_err(mod->ndev, "BUG: no free descriptors\n");
spin_unlock_irqrestore(&mod->lock, flags);
return NETDEV_TX_BUSY;
}
@@ -1597,6 +1594,7 @@ static const struct net_device_ops ican3_netdev_ops = {
.ndo_open = ican3_open,
.ndo_stop = ican3_stop,
.ndo_start_xmit = ican3_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
/*
@@ -1657,7 +1655,7 @@ static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
/* bring the bus online */
ret = ican3_set_bus_state(mod, true);
if (ret) {
- dev_err(mod->dev, "unable to set bus-on\n");
+ netdev_err(ndev, "unable to set bus-on\n");
return ret;
}
@@ -1682,7 +1680,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
if (ret == 0) {
- dev_info(mod->dev, "%s timed out\n", __func__);
+ netdev_info(mod->ndev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -1708,7 +1706,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
if (ret == 0) {
- dev_info(mod->dev, "%s timed out\n", __func__);
+ netdev_info(mod->ndev, "%s timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -1778,7 +1776,6 @@ static int ican3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
mod = netdev_priv(ndev);
mod->ndev = ndev;
- mod->dev = &pdev->dev;
mod->num = pdata->modno;
netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
skb_queue_head_init(&mod->echoq);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index cdb9808d12db..28c11f815245 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -601,10 +601,10 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
(bt->prop_seg - 1));
mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
(bt->phase_seg2 - 1));
- dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
- mcp251x_read_reg(spi, CNF1),
- mcp251x_read_reg(spi, CNF2),
- mcp251x_read_reg(spi, CNF3));
+ dev_dbg(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
+ mcp251x_read_reg(spi, CNF1),
+ mcp251x_read_reg(spi, CNF2),
+ mcp251x_read_reg(spi, CNF3));
return 0;
}
@@ -672,7 +672,7 @@ static int mcp251x_hw_probe(struct spi_device *spi)
static int mcp251x_power_enable(struct regulator *reg, int enable)
{
- if (IS_ERR(reg))
+ if (IS_ERR_OR_NULL(reg))
return 0;
if (enable)
@@ -996,6 +996,7 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_open = mcp251x_open,
.ndo_stop = mcp251x_stop,
.ndo_start_xmit = mcp251x_hard_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct of_device_id mcp251x_of_match[] = {
@@ -1155,8 +1156,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
- dev_info(&spi->dev, "probed\n");
-
return ret;
error_probe:
@@ -1197,9 +1196,7 @@ static int mcp251x_can_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-
-static int mcp251x_can_suspend(struct device *dev)
+static int __maybe_unused mcp251x_can_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -1221,7 +1218,7 @@ static int mcp251x_can_suspend(struct device *dev)
priv->after_suspend = AFTER_SUSPEND_DOWN;
}
- if (!IS_ERR(priv->power)) {
+ if (!IS_ERR_OR_NULL(priv->power)) {
regulator_disable(priv->power);
priv->after_suspend |= AFTER_SUSPEND_POWER;
}
@@ -1229,7 +1226,7 @@ static int mcp251x_can_suspend(struct device *dev)
return 0;
}
-static int mcp251x_can_resume(struct device *dev)
+static int __maybe_unused mcp251x_can_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -1249,7 +1246,6 @@ static int mcp251x_can_resume(struct device *dev)
enable_irq(spi->irq);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
mcp251x_can_resume);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index b9f3faabb0f3..e0c9be5e2ab7 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -647,9 +647,10 @@ static int mscan_close(struct net_device *dev)
}
static const struct net_device_ops mscan_netdev_ops = {
- .ndo_open = mscan_open,
- .ndo_stop = mscan_close,
- .ndo_start_xmit = mscan_start_xmit,
+ .ndo_open = mscan_open,
+ .ndo_stop = mscan_close,
+ .ndo_start_xmit = mscan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
int register_mscandev(struct net_device *dev, int mscan_clksrc)
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 6c077eb87b5e..6472562efedc 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -950,6 +950,7 @@ static const struct net_device_ops pch_can_netdev_ops = {
.ndo_open = pch_can_open,
.ndo_stop = pch_close,
.ndo_start_xmit = pch_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static void pch_can_remove(struct pci_dev *pdev)
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ff2ba86cd4a4..4b18b8765523 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -17,16 +17,9 @@ config CAN_SJA1000_PLATFORM
the "platform bus" (Linux abstraction for directly to the
processor attached devices). Which can be found on various
boards from Phytec (http://www.phytec.de) like the PCM027,
- PCM038.
-
-config CAN_SJA1000_OF_PLATFORM
- tristate "Generic OF Platform Bus based SJA1000 driver"
- depends on OF
- ---help---
- This driver adds support for the SJA1000 chips connected to
- the OpenFirmware "platform bus" found on embedded systems with
- OpenFirmware bindings, e.g. if you have a PowerPC based system
- you may want to enable this option.
+ PCM038. It also provides the OpenFirmware "platform bus" found
+ on embedded systems with OpenFirmware bindings, e.g. if you
+ have a PowerPC based system you may want to enable this option.
config CAN_EMS_PCMCIA
tristate "EMS CPC-CARD Card"
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index b3d05cbfec36..531d5fcc97e5 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_CAN_SJA1000) += sja1000.o
obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
-obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index d790b874ca79..fd13dbf07d9c 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -323,6 +323,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
priv->cdr = EMS_PCI_CDR;
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
if (card->version == 1)
/* reset int flag of pita */
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
index 9e535f2ef52b..381de998d2f1 100644
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -211,6 +211,7 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
priv = netdev_priv(dev);
priv->priv = card;
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index c96eb14699d5..23b8e1324e25 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -270,6 +270,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
priv->reg_base, board->conf_addr, dev->irq);
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = channel;
/* Register SJA1000 device */
err = register_sja1000dev(dev);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 065ca49eb45e..c540e3d12e3d 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -642,6 +642,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
icr |= chan->icr_mask;
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
/* Create chain of SJA1000 devices */
chan->prev_dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index f7ad754dd2aa..dd56133cc461 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -550,6 +550,7 @@ static int pcan_add_channels(struct pcan_pccard *card)
priv = netdev_priv(netdev);
priv->priv = card;
SET_NETDEV_DEV(netdev, &pdev->dev);
+ netdev->dev_id = i;
priv->irq_flags = IRQF_SHARED;
netdev->irq = pdev->irq;
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index fbb61a0d901f..ec39b7cb2287 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -587,6 +587,7 @@ static int plx_pci_add_card(struct pci_dev *pdev,
priv->cdr = ci->cdr;
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
/* Register SJA1000 device */
err = register_sja1000dev(dev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index f17c3018b7c7..f31499a32d7d 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -106,8 +106,7 @@ static int sja1000_probe_chip(struct net_device *dev)
struct sja1000_priv *priv = netdev_priv(dev);
if (priv->reg_base && sja1000_is_absent(priv)) {
- printk(KERN_INFO "%s: probing @0x%lX failed\n",
- DRV_NAME, dev->base_addr);
+ netdev_err(dev, "probing failed\n");
return 0;
}
return -1;
@@ -643,9 +642,10 @@ void free_sja1000dev(struct net_device *dev)
EXPORT_SYMBOL_GPL(free_sja1000dev);
static const struct net_device_ops sja1000_netdev_ops = {
- .ndo_open = sja1000_open,
- .ndo_stop = sja1000_close,
- .ndo_start_xmit = sja1000_start_xmit,
+ .ndo_open = sja1000_open,
+ .ndo_stop = sja1000_close,
+ .ndo_start_xmit = sja1000_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
int register_sja1000dev(struct net_device *dev)
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
deleted file mode 100644
index 2f6e24534231..000000000000
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
- *
- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
- * bus found on embedded PowerPC systems. You need a SJA1000 CAN node
- * definition in your flattened device tree source (DTS) file similar to:
- *
- * can@3,100 {
- * compatible = "nxp,sja1000";
- * reg = <3 0x100 0x80>;
- * interrupts = <2 0>;
- * interrupt-parent = <&mpic>;
- * nxp,external-clock-frequency = <16000000>;
- * };
- *
- * See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further
- * information.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/can/dev.h>
-
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include "sja1000.h"
-
-#define DRV_NAME "sja1000_of_platform"
-
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
-MODULE_LICENSE("GPL v2");
-
-#define SJA1000_OFP_CAN_CLOCK (16000000 / 2)
-
-#define SJA1000_OFP_OCR OCR_TX0_PULLDOWN
-#define SJA1000_OFP_CDR (CDR_CBP | CDR_CLK_OFF)
-
-static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
-{
- return ioread8(priv->reg_base + reg);
-}
-
-static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
- int reg, u8 val)
-{
- iowrite8(val, priv->reg_base + reg);
-}
-
-static int sja1000_ofp_remove(struct platform_device *ofdev)
-{
- struct net_device *dev = platform_get_drvdata(ofdev);
- struct sja1000_priv *priv = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
- struct resource res;
-
- unregister_sja1000dev(dev);
- free_sja1000dev(dev);
- iounmap(priv->reg_base);
- irq_dispose_mapping(dev->irq);
-
- of_address_to_resource(np, 0, &res);
- release_mem_region(res.start, resource_size(&res));
-
- return 0;
-}
-
-static int sja1000_ofp_probe(struct platform_device *ofdev)
-{
- struct device_node *np = ofdev->dev.of_node;
- struct net_device *dev;
- struct sja1000_priv *priv;
- struct resource res;
- u32 prop;
- int err, irq, res_size;
- void __iomem *base;
-
- err = of_address_to_resource(np, 0, &res);
- if (err) {
- dev_err(&ofdev->dev, "invalid address\n");
- return err;
- }
-
- res_size = resource_size(&res);
-
- if (!request_mem_region(res.start, res_size, DRV_NAME)) {
- dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
- return -EBUSY;
- }
-
- base = ioremap_nocache(res.start, res_size);
- if (!base) {
- dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
- err = -ENOMEM;
- goto exit_release_mem;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq == 0) {
- dev_err(&ofdev->dev, "no irq found\n");
- err = -ENODEV;
- goto exit_unmap_mem;
- }
-
- dev = alloc_sja1000dev(0);
- if (!dev) {
- err = -ENOMEM;
- goto exit_dispose_irq;
- }
-
- priv = netdev_priv(dev);
-
- priv->read_reg = sja1000_ofp_read_reg;
- priv->write_reg = sja1000_ofp_write_reg;
-
- err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
-
- err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
- if (!err)
- priv->ocr |= prop & OCR_MODE_MASK;
- else
- priv->ocr |= OCR_MODE_NORMAL; /* default */
-
- err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
- if (!err)
- priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
- else
- priv->ocr |= OCR_TX0_PULLDOWN; /* default */
-
- err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
- if (!err && prop) {
- u32 divider = priv->can.clock.freq * 2 / prop;
-
- if (divider > 1)
- priv->cdr |= divider / 2 - 1;
- else
- priv->cdr |= CDR_CLKOUT_MASK;
- } else {
- priv->cdr |= CDR_CLK_OFF; /* default */
- }
-
- if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
- priv->cdr |= CDR_CBP; /* default */
-
- priv->irq_flags = IRQF_SHARED;
- priv->reg_base = base;
-
- dev->irq = irq;
-
- dev_info(&ofdev->dev,
- "reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
- priv->reg_base, dev->irq, priv->can.clock.freq,
- priv->ocr, priv->cdr);
-
- platform_set_drvdata(ofdev, dev);
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
- err = register_sja1000dev(dev);
- if (err) {
- dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
- DRV_NAME, err);
- goto exit_free_sja1000;
- }
-
- return 0;
-
-exit_free_sja1000:
- free_sja1000dev(dev);
-exit_dispose_irq:
- irq_dispose_mapping(irq);
-exit_unmap_mem:
- iounmap(base);
-exit_release_mem:
- release_mem_region(res.start, res_size);
-
- return err;
-}
-
-static struct of_device_id sja1000_ofp_table[] = {
- {.compatible = "nxp,sja1000"},
- {},
-};
-MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
-
-static struct platform_driver sja1000_ofp_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .of_match_table = sja1000_ofp_table,
- },
- .probe = sja1000_ofp_probe,
- .remove = sja1000_ofp_remove,
-};
-
-module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 943df645b459..95a844a7ee7b 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -26,12 +26,16 @@
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include "sja1000.h"
#define DRV_NAME "sja1000_platform"
+#define SP_CAN_CLOCK (16000000 / 2)
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_LICENSE("GPL v2");
@@ -66,59 +70,16 @@ static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
iowrite8(val, priv->reg_base + reg * 4);
}
-static int sp_probe(struct platform_device *pdev)
+static void sp_populate(struct sja1000_priv *priv,
+ struct sja1000_platform_data *pdata,
+ unsigned long resource_mem_flags)
{
- int err;
- void __iomem *addr;
- struct net_device *dev;
- struct sja1000_priv *priv;
- struct resource *res_mem, *res_irq;
- struct sja1000_platform_data *pdata;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data provided!\n");
- err = -ENODEV;
- goto exit;
- }
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_mem || !res_irq) {
- err = -ENODEV;
- goto exit;
- }
-
- if (!request_mem_region(res_mem->start, resource_size(res_mem),
- DRV_NAME)) {
- err = -EBUSY;
- goto exit;
- }
-
- addr = ioremap_nocache(res_mem->start, resource_size(res_mem));
- if (!addr) {
- err = -ENOMEM;
- goto exit_release;
- }
-
- dev = alloc_sja1000dev(0);
- if (!dev) {
- err = -ENOMEM;
- goto exit_iounmap;
- }
- priv = netdev_priv(dev);
-
- dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
- if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
- priv->irq_flags |= IRQF_SHARED;
- priv->reg_base = addr;
/* The CAN clock frequency is half the oscillator clock frequency */
priv->can.clock.freq = pdata->osc_freq / 2;
priv->ocr = pdata->ocr;
priv->cdr = pdata->cdr;
- switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ switch (resource_mem_flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_32BIT:
priv->read_reg = sp_read_reg32;
priv->write_reg = sp_write_reg32;
@@ -133,6 +94,124 @@ static int sp_probe(struct platform_device *pdev)
priv->write_reg = sp_write_reg8;
break;
}
+}
+
+static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
+{
+ int err;
+ u32 prop;
+
+ err = of_property_read_u32(of, "reg-io-width", &prop);
+ if (err)
+ prop = 1; /* 8 bit is default */
+
+ switch (prop) {
+ case 4:
+ priv->read_reg = sp_read_reg32;
+ priv->write_reg = sp_write_reg32;
+ break;
+ case 2:
+ priv->read_reg = sp_read_reg16;
+ priv->write_reg = sp_write_reg16;
+ break;
+ case 1: /* fallthrough */
+ default:
+ priv->read_reg = sp_read_reg8;
+ priv->write_reg = sp_write_reg8;
+ }
+
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+
+ err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
+ if (!err)
+ priv->ocr |= prop & OCR_MODE_MASK;
+ else
+ priv->ocr |= OCR_MODE_NORMAL; /* default */
+
+ err = of_property_read_u32(of, "nxp,tx-output-config", &prop);
+ if (!err)
+ priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ else
+ priv->ocr |= OCR_TX0_PULLDOWN; /* default */
+
+ err = of_property_read_u32(of, "nxp,clock-out-frequency", &prop);
+ if (!err && prop) {
+ u32 divider = priv->can.clock.freq * 2 / prop;
+
+ if (divider > 1)
+ priv->cdr |= divider / 2 - 1;
+ else
+ priv->cdr |= CDR_CLKOUT_MASK;
+ } else {
+ priv->cdr |= CDR_CLK_OFF; /* default */
+ }
+
+ if (!of_property_read_bool(of, "nxp,no-comparator-bypass"))
+ priv->cdr |= CDR_CBP; /* default */
+}
+
+static int sp_probe(struct platform_device *pdev)
+{
+ int err, irq = 0;
+ void __iomem *addr;
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ struct resource *res_mem, *res_irq = NULL;
+ struct sja1000_platform_data *pdata;
+ struct device_node *of = pdev->dev.of_node;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata && !of) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ return -ENODEV;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem)
+ return -ENODEV;
+
+ if (!devm_request_mem_region(&pdev->dev, res_mem->start,
+ resource_size(res_mem), DRV_NAME))
+ return -EBUSY;
+
+ addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
+ resource_size(res_mem));
+ if (!addr)
+ return -ENOMEM;
+
+ if (of)
+ irq = irq_of_parse_and_map(of, 0);
+ else
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!irq && !res_irq)
+ return -ENODEV;
+
+ dev = alloc_sja1000dev(0);
+ if (!dev)
+ return -ENOMEM;
+ priv = netdev_priv(dev);
+
+ if (res_irq) {
+ irq = res_irq->start;
+ priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ priv->irq_flags |= IRQF_SHARED;
+ } else {
+ priv->irq_flags = IRQF_SHARED;
+ }
+
+ dev->irq = irq;
+ priv->reg_base = addr;
+
+ if (of)
+ sp_populate_of(priv, of);
+ else
+ sp_populate(priv, pdata, res_mem->flags);
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -150,39 +229,32 @@ static int sp_probe(struct platform_device *pdev)
exit_free:
free_sja1000dev(dev);
- exit_iounmap:
- iounmap(addr);
- exit_release:
- release_mem_region(res_mem->start, resource_size(res_mem));
- exit:
return err;
}
static int sp_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct sja1000_priv *priv = netdev_priv(dev);
- struct resource *res;
unregister_sja1000dev(dev);
-
- if (priv->reg_base)
- iounmap(priv->reg_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
free_sja1000dev(dev);
return 0;
}
+static struct of_device_id sp_of_table[] = {
+ {.compatible = "nxp,sja1000"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sp_of_table);
+
static struct platform_driver sp_driver = {
.probe = sp_probe,
.remove = sp_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = sp_of_table,
},
};
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 3fcdae266377..f5b16e0e3a12 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -411,10 +411,16 @@ static void slc_free_netdev(struct net_device *dev)
slcan_devs[i] = NULL;
}
+static int slcan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ return -EINVAL;
+}
+
static const struct net_device_ops slc_netdev_ops = {
.ndo_open = slc_open,
.ndo_stop = slc_close,
.ndo_start_xmit = slc_xmit,
+ .ndo_change_mtu = slcan_change_mtu,
};
static void slc_setup(struct net_device *dev)
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 9ea0dcde94ce..7d8c8f3672dd 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -628,6 +628,7 @@ static const struct net_device_ops softing_netdev_ops = {
.ndo_open = softing_netdev_open,
.ndo_stop = softing_netdev_stop,
.ndo_start_xmit = softing_netdev_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const softing_btr_const = {
@@ -832,6 +833,7 @@ static int softing_pdev_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto netdev_failed;
}
+ netdev->dev_id = j;
priv = netdev_priv(card->net[j]);
priv->index = j;
ret = softing_netdev_register(netdev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 2c62fe6c8fa9..258b9c4856ec 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -871,6 +871,7 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_open = ti_hecc_open,
.ndo_stop = ti_hecc_close,
.ndo_start_xmit = ti_hecc_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static int ti_hecc_probe(struct platform_device *pdev)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 52c42fd49510..00f2534dde73 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -883,6 +883,7 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_open = ems_usb_open,
.ndo_stop = ems_usb_close,
.ndo_start_xmit = ems_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const ems_usb_bittiming_const = {
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 7fbe85935f1d..b7c9e8b11460 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -888,6 +888,7 @@ static const struct net_device_ops esd_usb2_netdev_ops = {
.ndo_open = esd_usb2_open,
.ndo_stop = esd_usb2_close,
.ndo_start_xmit = esd_usb2_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const esd_usb2_bittiming_const = {
@@ -1024,6 +1025,7 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
netdev->netdev_ops = &esd_usb2_netdev_ops;
SET_NETDEV_DEV(netdev, &intf->dev);
+ netdev->dev_id = index;
err = register_candev(netdev);
if (err) {
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index e77d11049747..4ca46edc061d 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1388,6 +1388,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_open = kvaser_usb_open,
.ndo_stop = kvaser_usb_close,
.ndo_start_xmit = kvaser_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const kvaser_usb_bittiming_const = {
@@ -1529,6 +1530,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
netdev->netdev_ops = &kvaser_usb_netdev_ops;
SET_NETDEV_DEV(netdev, &intf->dev);
+ netdev->dev_id = channel;
dev->nets[channel] = priv;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 0b7a4c3b01a2..644e6ab8a489 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -702,6 +702,7 @@ static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_open = peak_usb_ndo_open,
.ndo_stop = peak_usb_ndo_stop,
.ndo_start_xmit = peak_usb_ndo_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
/*
@@ -769,6 +770,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(netdev, &intf->dev);
+ netdev->dev_id = ctrl_idx;
err = register_candev(netdev);
if (err) {
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index a0fa1fd5092b..ef674ecb82f8 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -697,8 +697,8 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
nofreecontext:
- usb_unanchor_urb(urb);
usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+ usb_free_urb(urb);
netdev_warn(netdev, "couldn't find free context");
@@ -887,6 +887,7 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
.ndo_open = usb_8dev_open,
.ndo_stop = usb_8dev_close,
.ndo_start_xmit = usb_8dev_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct can_bittiming_const usb_8dev_bittiming_const = {
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bd8f84b0b894..0932ffbf381b 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -63,10 +63,10 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
dstats = per_cpu_ptr(dev->dstats, i);
do {
- start = u64_stats_fetch_begin_bh(&dstats->syncp);
+ start = u64_stats_fetch_begin_irq(&dstats->syncp);
tbytes = dstats->tx_bytes;
tpackets = dstats->tx_packets;
- } while (u64_stats_fetch_retry_bh(&dstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpackets;
}
@@ -88,16 +88,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- int i;
- dev->dstats = alloc_percpu(struct pcpu_dstats);
+ dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
if (!dev->dstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_dstats *dstats;
- dstats = per_cpu_ptr(dev->dstats, i);
- u64_stats_init(&dstats->syncp);
- }
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index c53384d41c96..35df0b9e6848 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -749,7 +749,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
- dev_kfree_skb (skb);
+ dev_consume_skb_any (skb);
/* Clear the Tx status stack. */
{
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 5992860a39c9..063557e037f2 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -1,23 +1,24 @@
-/*======================================================================
-
- A PCMCIA ethernet driver for the 3com 3c589 card.
-
- Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
-
- 3c589_cs.c 1.162 2001/10/13 00:08:50
-
- The network driver code is based on Donald Becker's 3c589 code:
-
- Written 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
- Donald Becker may be reached at becker@scyld.com
-
- Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
-
-======================================================================*/
+/* ======================================================================
+ *
+ * A PCMCIA ethernet driver for the 3com 3c589 card.
+ *
+ * Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+ *
+ * 3c589_cs.c 1.162 2001/10/13 00:08:50
+ *
+ * The network driver code is based on Donald Becker's 3c589 code:
+ *
+ * Written 1994 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may be used and
+ * distributed according to the terms of the GNU General Public License,
+ * incorporated herein by reference.
+ * Donald Becker may be reached at becker@scyld.com
+ *
+ * Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * ======================================================================
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -41,18 +42,20 @@
#include <linux/ioport.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
/* To minimize the size of the driver source I only define operating
- constants if they are used several times. You'll need the manual
- if you want to understand driver details. */
+ * constants if they are used several times. You'll need the manual
+ * if you want to understand driver details.
+ */
+
/* Offsets from base I/O address. */
#define EL3_DATA 0x00
#define EL3_TIMER 0x0a
@@ -65,7 +68,9 @@
#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
/* The top five bits written to EL3_CMD are a command, the lower
- 11 bits are the parameter, if applicable. */
+ * 11 bits are the parameter, if applicable.
+ */
+
enum c509cmd {
TotalReset = 0<<11,
SelectWindow = 1<<11,
@@ -190,138 +195,142 @@ static const struct net_device_ops el3_netdev_ops = {
static int tc589_probe(struct pcmcia_device *link)
{
- struct el3_private *lp;
- struct net_device *dev;
+ struct el3_private *lp;
+ struct net_device *dev;
- dev_dbg(&link->dev, "3c589_attach()\n");
+ dev_dbg(&link->dev, "3c589_attach()\n");
- /* Create new ethernet device */
- dev = alloc_etherdev(sizeof(struct el3_private));
- if (!dev)
- return -ENOMEM;
- lp = netdev_priv(dev);
- link->priv = dev;
- lp->p_dev = link;
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ link->priv = dev;
+ lp->p_dev = link;
- spin_lock_init(&lp->lock);
- link->resource[0]->end = 16;
- link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ spin_lock_init(&lp->lock);
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
- link->config_flags |= CONF_ENABLE_IRQ;
- link->config_index = 1;
+ link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_index = 1;
- dev->netdev_ops = &el3_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
+ dev->netdev_ops = &el3_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
- return tc589_config(link);
+ return tc589_config(link);
}
static void tc589_detach(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
+ struct net_device *dev = link->priv;
- dev_dbg(&link->dev, "3c589_detach\n");
+ dev_dbg(&link->dev, "3c589_detach\n");
- unregister_netdev(dev);
+ unregister_netdev(dev);
- tc589_release(link);
+ tc589_release(link);
- free_netdev(dev);
+ free_netdev(dev);
} /* tc589_detach */
static int tc589_config(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
- __be16 *phys_addr;
- int ret, i, j, multi = 0, fifo;
- unsigned int ioaddr;
- static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
- u8 *buf;
- size_t len;
-
- dev_dbg(&link->dev, "3c589_config\n");
-
- phys_addr = (__be16 *)dev->dev_addr;
- /* Is this a 3c562? */
- if (link->manf_id != MANFID_3COM)
- dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
- multi = (link->card_id == PRODID_3COM_3C562);
-
- link->io_lines = 16;
-
- /* For the 3c562, the base address must be xx00-xx7f */
- for (i = j = 0; j < 0x400; j += 0x10) {
- if (multi && (j & 0x80)) continue;
- link->resource[0]->start = j ^ 0x300;
- i = pcmcia_request_io(link);
- if (i == 0)
- break;
- }
- if (i != 0)
- goto failed;
-
- ret = pcmcia_request_irq(link, el3_interrupt);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- dev->irq = link->irq;
- dev->base_addr = link->resource[0]->start;
- ioaddr = dev->base_addr;
- EL3WINDOW(0);
-
- /* The 3c589 has an extra EEPROM for configuration info, including
- the hardware address. The 3c562 puts the address in the CIS. */
- len = pcmcia_get_tuple(link, 0x88, &buf);
- if (buf && len >= 6) {
- for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
- kfree(buf);
- } else {
- kfree(buf); /* 0 < len < 6 */
- for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i));
- if (phys_addr[0] == htons(0x6060)) {
- dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
- dev->base_addr, dev->base_addr+15);
- goto failed;
+ struct net_device *dev = link->priv;
+ __be16 *phys_addr;
+ int ret, i, j, multi = 0, fifo;
+ unsigned int ioaddr;
+ static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ u8 *buf;
+ size_t len;
+
+ dev_dbg(&link->dev, "3c589_config\n");
+
+ phys_addr = (__be16 *)dev->dev_addr;
+ /* Is this a 3c562? */
+ if (link->manf_id != MANFID_3COM)
+ dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
+ multi = (link->card_id == PRODID_3COM_3C562);
+
+ link->io_lines = 16;
+
+ /* For the 3c562, the base address must be xx00-xx7f */
+ for (i = j = 0; j < 0x400; j += 0x10) {
+ if (multi && (j & 0x80))
+ continue;
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
+ if (i == 0)
+ break;
}
- }
-
- /* The address and resource configuration register aren't loaded from
- the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */
- outw(0x3f00, ioaddr + 8);
- fifo = inl(ioaddr);
-
- /* The if_port symbol can be set when the module is loaded */
- if ((if_port >= 0) && (if_port <= 3))
- dev->if_port = if_port;
- else
- dev_err(&link->dev, "invalid if_port requested\n");
-
- SET_NETDEV_DEV(dev, &link->dev);
-
- if (register_netdev(dev) != 0) {
- dev_err(&link->dev, "register_netdev() failed\n");
- goto failed;
- }
-
- netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
- (multi ? "562" : "589"), dev->base_addr, dev->irq,
- dev->dev_addr);
- netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
- (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
- if_names[dev->if_port]);
- return 0;
+ if (i != 0)
+ goto failed;
+
+ ret = pcmcia_request_irq(link, el3_interrupt);
+ if (ret)
+ goto failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+ ioaddr = dev->base_addr;
+ EL3WINDOW(0);
+
+ /* The 3c589 has an extra EEPROM for configuration info, including
+ * the hardware address. The 3c562 puts the address in the CIS.
+ */
+ len = pcmcia_get_tuple(link, 0x88, &buf);
+ if (buf && len >= 6) {
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+ kfree(buf);
+ } else {
+ kfree(buf); /* 0 < len < 6 */
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ if (phys_addr[0] == htons(0x6060)) {
+ dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+
+ /* The address and resource configuration register aren't loaded from
+ * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
+ */
+
+ outw(0x3f00, ioaddr + 8);
+ fifo = inl(ioaddr);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if ((if_port >= 0) && (if_port <= 3))
+ dev->if_port = if_port;
+ else
+ dev_err(&link->dev, "invalid if_port requested\n");
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ dev_err(&link->dev, "register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+ (multi ? "562" : "589"), dev->base_addr, dev->irq,
+ dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
+ return 0;
failed:
- tc589_release(link);
- return -ENODEV;
+ tc589_release(link);
+ return -ENODEV;
} /* tc589_config */
static void tc589_release(struct pcmcia_device *link)
@@ -353,113 +362,120 @@ static int tc589_resume(struct pcmcia_device *link)
/*====================================================================*/
-/*
- Use this for commands that may take time to finish
-*/
+/* Use this for commands that may take time to finish */
+
static void tc589_wait_for_completion(struct net_device *dev, int cmd)
{
- int i = 100;
- outw(cmd, dev->base_addr + EL3_CMD);
- while (--i > 0)
- if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
- if (i == 0)
- netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
+ int i = 100;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000))
+ break;
+ if (i == 0)
+ netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
}
-/*
- Read a word from the EEPROM using the regular EEPROM access register.
- Assume that we are in register window zero.
-*/
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ * Assume that we are in register window zero.
+ */
+
static u16 read_eeprom(unsigned int ioaddr, int index)
{
- int i;
- outw(EEPROM_READ + index, ioaddr + 10);
- /* Reading the eeprom takes 162 us */
- for (i = 1620; i >= 0; i--)
- if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
- break;
- return inw(ioaddr + 12);
+ int i;
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Reading the eeprom takes 162 us */
+ for (i = 1620; i >= 0; i--)
+ if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
+ break;
+ return inw(ioaddr + 12);
}
-/*
- Set transceiver type, perhaps to something other than what the user
- specified in dev->if_port.
-*/
+/* Set transceiver type, perhaps to something other than what the user
+ * specified in dev->if_port.
+ */
+
static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr = dev->base_addr;
-
- EL3WINDOW(0);
- switch (if_port) {
- case 0: case 1: outw(0, ioaddr + 6); break;
- case 2: outw(3<<14, ioaddr + 6); break;
- case 3: outw(1<<14, ioaddr + 6); break;
- }
- /* On PCMCIA, this just turns on the LED */
- outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
- /* 10baseT interface, enable link beat and jabber check. */
- EL3WINDOW(4);
- outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
- EL3WINDOW(1);
- if (if_port == 2)
- lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
- else
- lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ switch (if_port) {
+ case 0:
+ case 1:
+ outw(0, ioaddr + 6);
+ break;
+ case 2:
+ outw(3<<14, ioaddr + 6);
+ break;
+ case 3:
+ outw(1<<14, ioaddr + 6);
+ break;
+ }
+ /* On PCMCIA, this just turns on the LED */
+ outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ /* 10baseT interface, enable link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ if (if_port == 2)
+ lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
+ else
+ lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
}
static void dump_status(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- EL3WINDOW(1);
- netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
- inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
- inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
- EL3WINDOW(4);
- netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
- inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
- inw(ioaddr+0x0a));
- EL3WINDOW(1);
+ unsigned int ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+ inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
+ EL3WINDOW(4);
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+ inw(ioaddr+0x0a));
+ EL3WINDOW(1);
}
/* Reset and restore all of the 3c589 registers. */
static void tc589_reset(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int i;
-
- EL3WINDOW(0);
- outw(0x0001, ioaddr + 4); /* Activate board. */
- outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
-
- /* Set the station address in window 2. */
- EL3WINDOW(2);
- for (i = 0; i < 6; i++)
- outb(dev->dev_addr[i], ioaddr + i);
-
- tc589_set_xcvr(dev, dev->if_port);
-
- /* Switch to the stats window, and clear all stats by reading. */
- outw(StatsDisable, ioaddr + EL3_CMD);
- EL3WINDOW(6);
- for (i = 0; i < 9; i++)
- inb(ioaddr+i);
- inw(ioaddr + 10);
- inw(ioaddr + 12);
-
- /* Switch to register set 1 for normal use. */
- EL3WINDOW(1);
-
- set_rx_mode(dev);
- outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
- outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
- outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
- /* Allow status bits to be seen. */
- outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
- /* Ack all pending events, and set active indicator mask. */
- outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ EL3WINDOW(0);
+ outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
+
+ /* Set the station address in window 2. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ tc589_set_xcvr(dev, dev->if_port);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr+i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
- outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
| AdapterFailure, ioaddr + EL3_CMD);
}
@@ -478,381 +494,406 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static int el3_config(struct net_device *dev, struct ifmap *map)
{
- if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
- if (map->port <= 3) {
- dev->if_port = map->port;
- netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
- tc589_set_xcvr(dev, dev->if_port);
- } else
- return -EINVAL;
- }
- return 0;
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 3) {
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ tc589_set_xcvr(dev, dev->if_port);
+ } else {
+ return -EINVAL;
+ }
+ }
+ return 0;
}
static int el3_open(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- struct pcmcia_device *link = lp->p_dev;
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
- if (!pcmcia_dev_present(link))
- return -ENODEV;
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
- link->open++;
- netif_start_queue(dev);
+ link->open++;
+ netif_start_queue(dev);
- tc589_reset(dev);
- init_timer(&lp->media);
- lp->media.function = media_check;
- lp->media.data = (unsigned long) dev;
- lp->media.expires = jiffies + HZ;
- add_timer(&lp->media);
+ tc589_reset(dev);
+ init_timer(&lp->media);
+ lp->media.function = media_check;
+ lp->media.data = (unsigned long) dev;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
- dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
+ dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
dev->name, inw(dev->base_addr + EL3_STATUS));
- return 0;
+ return 0;
}
static void el3_tx_timeout(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
-
- netdev_warn(dev, "Transmit timed out!\n");
- dump_status(dev);
- dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
- /* Issue TX_RESET and TX_START commands. */
- tc589_wait_for_completion(dev, TxReset);
- outw(TxEnable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_warn(dev, "Transmit timed out!\n");
+ dump_status(dev);
+ dev->stats.tx_errors++;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ /* Issue TX_RESET and TX_START commands. */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
}
static void pop_tx_status(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int i;
-
- /* Clear the Tx status stack. */
- for (i = 32; i > 0; i--) {
- u_char tx_status = inb(ioaddr + TX_STATUS);
- if (!(tx_status & 0x84)) break;
- /* reset transmitter on jabber error or underrun */
- if (tx_status & 0x30)
- tc589_wait_for_completion(dev, TxReset);
- if (tx_status & 0x38) {
- netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
- outw(TxEnable, ioaddr + EL3_CMD);
- dev->stats.tx_aborted_errors++;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TX_STATUS);
+ if (!(tx_status & 0x84))
+ break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc589_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
- outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
- }
}
static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- struct el3_private *priv = netdev_priv(dev);
- unsigned long flags;
+ unsigned int ioaddr = dev->base_addr;
+ struct el3_private *priv = netdev_priv(dev);
+ unsigned long flags;
- netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
(long)skb->len, inw(ioaddr + EL3_STATUS));
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
- dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
- /* Put out the doubleword header... */
- outw(skb->len, ioaddr + TX_FIFO);
- outw(0x00, ioaddr + TX_FIFO);
- /* ... and the packet rounded to a doubleword. */
- outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- if (inw(ioaddr + TX_FREE) <= 1536) {
- netif_stop_queue(dev);
- /* Interrupt us when the FIFO has room for max-sized packet. */
- outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
- }
+ if (inw(ioaddr + TX_FREE) <= 1536) {
+ netif_stop_queue(dev);
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
- pop_tx_status(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
- dev_kfree_skb(skb);
+ pop_tx_status(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/* The EL3 interrupt handler. */
static irqreturn_t el3_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr;
- __u16 status;
- int i = 0, handled = 1;
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ __u16 status;
+ int i = 0, handled = 1;
- if (!netif_device_present(dev))
- return IRQ_NONE;
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
- ioaddr = dev->base_addr;
+ ioaddr = dev->base_addr;
- netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
- spin_lock(&lp->lock);
- while ((status = inw(ioaddr + EL3_STATUS)) &
+ spin_lock(&lp->lock);
+ while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
- if ((status & 0xe000) != 0x2000) {
- netdev_dbg(dev, "interrupt from dead card\n");
- handled = 0;
- break;
- }
- if (status & RxComplete)
- el3_rx(dev);
- if (status & TxAvailable) {
- netdev_dbg(dev, " TX room bit was handled.\n");
- /* There's room in the FIFO for a full-sized packet. */
- outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
- }
- if (status & TxComplete)
- pop_tx_status(dev);
- if (status & (AdapterFailure | RxEarly | StatsFull)) {
- /* Handle all uncommon interrupts. */
- if (status & StatsFull) /* Empty statistics. */
- update_stats(dev);
- if (status & RxEarly) { /* Rx early is unused. */
- el3_rx(dev);
- outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
- if (status & AdapterFailure) {
- u16 fifo_diag;
- EL3WINDOW(4);
- fifo_diag = inw(ioaddr + 4);
- EL3WINDOW(1);
- netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+ if ((status & 0xe000) != 0x2000) {
+ netdev_dbg(dev, "interrupt from dead card\n");
+ handled = 0;
+ break;
+ }
+ if (status & RxComplete)
+ el3_rx(dev);
+ if (status & TxAvailable) {
+ netdev_dbg(dev, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+ if (status & TxComplete)
+ pop_tx_status(dev);
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) {
+ /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + 4);
+ EL3WINDOW(1);
+ netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
fifo_diag);
- if (fifo_diag & 0x0400) {
- /* Tx overrun */
- tc589_wait_for_completion(dev, TxReset);
- outw(TxEnable, ioaddr + EL3_CMD);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc589_wait_for_completion(dev, RxReset);
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
}
- if (fifo_diag & 0x2000) {
- /* Rx underrun */
- tc589_wait_for_completion(dev, RxReset);
- set_rx_mode(dev);
- outw(RxEnable, ioaddr + EL3_CMD);
+ if (++i > 10) {
+ netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+ status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
}
- outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
- }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
}
- if (++i > 10) {
- netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
- status);
- /* Clear all interrupts */
- outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
- break;
- }
- /* Acknowledge the IRQ. */
- outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
- }
- lp->last_irq = jiffies;
- spin_unlock(&lp->lock);
- netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
- inw(ioaddr + EL3_STATUS));
- return IRQ_RETVAL(handled);
+ lp->last_irq = jiffies;
+ spin_unlock(&lp->lock);
+ netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS));
+ return IRQ_RETVAL(handled);
}
static void media_check(unsigned long arg)
{
- struct net_device *dev = (struct net_device *)(arg);
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr = dev->base_addr;
- u16 media, errs;
- unsigned long flags;
+ struct net_device *dev = (struct net_device *)(arg);
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u16 media, errs;
+ unsigned long flags;
- if (!netif_device_present(dev)) goto reschedule;
+ if (!netif_device_present(dev))
+ goto reschedule;
- /* Check for pending interrupt with expired latency timer: with
- this, we can limp along even if the interrupt is blocked */
- if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ /* Check for pending interrupt with expired latency timer: with
+ * this, we can limp along even if the interrupt is blocked
+ */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
(inb(ioaddr + EL3_TIMER) == 0xff)) {
- if (!lp->fast_poll)
- netdev_warn(dev, "interrupt(s) dropped!\n");
-
- local_irq_save(flags);
- el3_interrupt(dev->irq, dev);
- local_irq_restore(flags);
-
- lp->fast_poll = HZ;
- }
- if (lp->fast_poll) {
- lp->fast_poll--;
- lp->media.expires = jiffies + HZ/100;
- add_timer(&lp->media);
- return;
- }
-
- /* lp->lock guards the EL3 window. Window should always be 1 except
- when the lock is held */
- spin_lock_irqsave(&lp->lock, flags);
- EL3WINDOW(4);
- media = inw(ioaddr+WN4_MEDIA) & 0xc810;
-
- /* Ignore collisions unless we've had no irq's recently */
- if (time_before(jiffies, lp->last_irq + HZ)) {
- media &= ~0x0010;
- } else {
- /* Try harder to detect carrier errors */
- EL3WINDOW(6);
- outw(StatsDisable, ioaddr + EL3_CMD);
- errs = inb(ioaddr + 0);
- outw(StatsEnable, ioaddr + EL3_CMD);
- dev->stats.tx_carrier_errors += errs;
- if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
- }
+ if (!lp->fast_poll)
+ netdev_warn(dev, "interrupt(s) dropped!\n");
+
+ local_irq_save(flags);
+ el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + HZ/100;
+ add_timer(&lp->media);
+ return;
+ }
+
+ /* lp->lock guards the EL3 window. Window should always be 1 except
+ * when the lock is held
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+ EL3WINDOW(4);
+ media = inw(ioaddr+WN4_MEDIA) & 0xc810;
+
+ /* Ignore collisions unless we've had no irq's recently */
+ if (time_before(jiffies, lp->last_irq + HZ)) {
+ media &= ~0x0010;
+ } else {
+ /* Try harder to detect carrier errors */
+ EL3WINDOW(6);
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ errs = inb(ioaddr + 0);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_carrier_errors += errs;
+ if (errs || (lp->media_status & 0x0010))
+ media |= 0x0010;
+ }
- if (media != lp->media_status) {
- if ((media & lp->media_status & 0x8000) &&
- ((lp->media_status ^ media) & 0x0800))
+ if (media != lp->media_status) {
+ if ((media & lp->media_status & 0x8000) &&
+ ((lp->media_status ^ media) & 0x0800))
netdev_info(dev, "%s link beat\n",
- (lp->media_status & 0x0800 ? "lost" : "found"));
- else if ((media & lp->media_status & 0x4000) &&
+ (lp->media_status & 0x0800 ? "lost" : "found"));
+ else if ((media & lp->media_status & 0x4000) &&
((lp->media_status ^ media) & 0x0010))
netdev_info(dev, "coax cable %s\n",
- (lp->media_status & 0x0010 ? "ok" : "problem"));
- if (dev->if_port == 0) {
- if (media & 0x8000) {
- if (media & 0x0800)
- netdev_info(dev, "flipped to 10baseT\n");
- else
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
+ if (dev->if_port == 0) {
+ if (media & 0x8000) {
+ if (media & 0x0800)
+ netdev_info(dev, "flipped to 10baseT\n");
+ else
tc589_set_xcvr(dev, 2);
- } else if (media & 0x4000) {
- if (media & 0x0010)
- tc589_set_xcvr(dev, 1);
- else
- netdev_info(dev, "flipped to 10base2\n");
- }
+ } else if (media & 0x4000) {
+ if (media & 0x0010)
+ tc589_set_xcvr(dev, 1);
+ else
+ netdev_info(dev, "flipped to 10base2\n");
+ }
+ }
+ lp->media_status = media;
}
- lp->media_status = media;
- }
- EL3WINDOW(1);
- spin_unlock_irqrestore(&lp->lock, flags);
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->lock, flags);
reschedule:
- lp->media.expires = jiffies + HZ;
- add_timer(&lp->media);
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
}
static struct net_device_stats *el3_get_stats(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- unsigned long flags;
- struct pcmcia_device *link = lp->p_dev;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ struct pcmcia_device *link = lp->p_dev;
- if (pcmcia_dev_present(link)) {
- spin_lock_irqsave(&lp->lock, flags);
- update_stats(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- return &dev->stats;
+ if (pcmcia_dev_present(link)) {
+ spin_lock_irqsave(&lp->lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return &dev->stats;
}
-/*
- Update statistics. We change to register window 6, so this should be run
- single-threaded if the device is active. This is expected to be a rare
- operation, and it's simpler for the rest of the driver to assume that
- window 1 is always valid rather than use a special window-state variable.
-
- Caller must hold the lock for this
+/* Update statistics. We change to register window 6, so this should be run
+* single-threaded if the device is active. This is expected to be a rare
+* operation, and it's simpler for the rest of the driver to assume that
+* window 1 is always valid rather than use a special window-state variable.
+*
+* Caller must hold the lock for this
*/
+
static void update_stats(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
-
- netdev_dbg(dev, "updating the statistics.\n");
- /* Turn off statistics updates while reading. */
- outw(StatsDisable, ioaddr + EL3_CMD);
- /* Switch to the stats window, and read everything. */
- EL3WINDOW(6);
- dev->stats.tx_carrier_errors += inb(ioaddr + 0);
- dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
- /* Multiple collisions. */ inb(ioaddr + 2);
- dev->stats.collisions += inb(ioaddr + 3);
- dev->stats.tx_window_errors += inb(ioaddr + 4);
- dev->stats.rx_fifo_errors += inb(ioaddr + 5);
- dev->stats.tx_packets += inb(ioaddr + 6);
- /* Rx packets */ inb(ioaddr + 7);
- /* Tx deferrals */ inb(ioaddr + 8);
- /* Rx octets */ inw(ioaddr + 10);
- /* Tx octets */ inw(ioaddr + 12);
-
- /* Back to window 1, and turn statistics back on. */
- EL3WINDOW(1);
- outw(StatsEnable, ioaddr + EL3_CMD);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_dbg(dev, "updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */
+ inb(ioaddr + 2);
+ dev->stats.collisions += inb(ioaddr + 3);
+ dev->stats.tx_window_errors += inb(ioaddr + 4);
+ dev->stats.rx_fifo_errors += inb(ioaddr + 5);
+ dev->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */
+ inb(ioaddr + 7);
+ /* Tx deferrals */
+ inb(ioaddr + 8);
+ /* Rx octets */
+ inw(ioaddr + 10);
+ /* Tx octets */
+ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
}
static int el3_rx(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int worklimit = 32;
- short rx_status;
+ unsigned int ioaddr = dev->base_addr;
+ int worklimit = 32;
+ short rx_status;
- netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
- while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
+ while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
worklimit > 0) {
- worklimit--;
- if (rx_status & 0x4000) { /* Error, update stats. */
- short error = rx_status & 0x3800;
- dev->stats.rx_errors++;
- switch (error) {
- case 0x0000: dev->stats.rx_over_errors++; break;
- case 0x0800: dev->stats.rx_length_errors++; break;
- case 0x1000: dev->stats.rx_frame_errors++; break;
- case 0x1800: dev->stats.rx_length_errors++; break;
- case 0x2000: dev->stats.rx_frame_errors++; break;
- case 0x2800: dev->stats.rx_crc_errors++; break;
- }
- } else {
- short pkt_len = rx_status & 0x7ff;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 5);
-
- netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
+ worklimit--;
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ dev->stats.rx_errors++;
+ switch (error) {
+ case 0x0000:
+ dev->stats.rx_over_errors++;
+ break;
+ case 0x0800:
+ dev->stats.rx_length_errors++;
+ break;
+ case 0x1000:
+ dev->stats.rx_frame_errors++;
+ break;
+ case 0x1800:
+ dev->stats.rx_length_errors++;
+ break;
+ case 0x2000:
+ dev->stats.rx_frame_errors++;
+ break;
+ case 0x2800:
+ dev->stats.rx_crc_errors++;
+ break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, pkt_len + 5);
+
+ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
- if (skb != NULL) {
- skb_reserve(skb, 2);
- insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
(pkt_len+3)>>2);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- } else {
- netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ } else {
+ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
pkt_len);
- dev->stats.rx_dropped++;
- }
+ dev->stats.rx_dropped++;
+ }
+ }
+ /* Pop the top of the Rx FIFO */
+ tc589_wait_for_completion(dev, RxDiscard);
}
- /* Pop the top of the Rx FIFO */
- tc589_wait_for_completion(dev, RxDiscard);
- }
- if (worklimit == 0)
- netdev_warn(dev, "too much work in el3_rx!\n");
- return 0;
+ if (worklimit == 0)
+ netdev_warn(dev, "too much work in el3_rx!\n");
+ return 0;
}
static void set_rx_mode(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- u16 opts = SetRxFilter | RxStation | RxBroadcast;
-
- if (dev->flags & IFF_PROMISC)
- opts |= RxMulticast | RxProm;
- else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
- opts |= RxMulticast;
- outw(opts, ioaddr + EL3_CMD);
+ unsigned int ioaddr = dev->base_addr;
+ u16 opts = SetRxFilter | RxStation | RxBroadcast;
+
+ if (dev->flags & IFF_PROMISC)
+ opts |= RxMulticast | RxProm;
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
+ opts |= RxMulticast;
+ outw(opts, ioaddr + EL3_CMD);
}
static void set_multicast_list(struct net_device *dev)
@@ -867,44 +908,44 @@ static void set_multicast_list(struct net_device *dev)
static int el3_close(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- struct pcmcia_device *link = lp->p_dev;
- unsigned int ioaddr = dev->base_addr;
-
- dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+ unsigned int ioaddr = dev->base_addr;
+
+ dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+ if (pcmcia_dev_present(link)) {
+ /* Turn off statistics ASAP. We update dev->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 1) {
+ /* Disable link beat and jabber */
+ EL3WINDOW(4);
+ outw(0, ioaddr + WN4_MEDIA);
+ }
- if (pcmcia_dev_present(link)) {
- /* Turn off statistics ASAP. We update dev->stats below. */
- outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
- /* Disable the receiver and transmitter. */
- outw(RxDisable, ioaddr + EL3_CMD);
- outw(TxDisable, ioaddr + EL3_CMD);
-
- if (dev->if_port == 2)
- /* Turn off thinnet power. Green! */
- outw(StopCoax, ioaddr + EL3_CMD);
- else if (dev->if_port == 1) {
- /* Disable link beat and jabber */
- EL3WINDOW(4);
- outw(0, ioaddr + WN4_MEDIA);
+ /* Check if the card still exists */
+ if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
+ update_stats(dev);
}
- /* Switching back to window 0 disables the IRQ. */
- EL3WINDOW(0);
- /* But we explicitly zero the IRQ line select anyway. */
- outw(0x0f00, ioaddr + WN0_IRQ);
-
- /* Check if the card still exists */
- if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
- update_stats(dev);
- }
-
- link->open--;
- netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->media);
- return 0;
+ return 0;
}
static const struct pcmcia_device_id tc589_ids[] = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 238ccea965c8..61477b8e8d24 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2086,7 +2086,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* ... and the packet rounded to a doubleword. */
skb_tx_timestamp(skb);
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev_kfree_skb (skb);
+ dev_consume_skb_any (skb);
if (ioread16(ioaddr + TxFree) > 1536) {
netif_start_queue (dev); /* AKPM: redundant? */
} else {
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index d2cd80444ade..599311f0e05c 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -404,7 +404,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
spin_unlock(&ei_local->page_lock);
enable_irq_lockdep_irqrestore(dev->irq, &flags);
skb_tx_timestamp(skb);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
dev->stats.tx_bytes += send_length;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 506b0248c400..39b26fe28d10 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -22,6 +22,7 @@ source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
+source "drivers/net/ethernet/altera/Kconfig"
source "drivers/net/ethernet/amd/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
@@ -149,6 +150,7 @@ config S6GMAC
To compile this driver as a module, choose M here. The module
will be called s6gmac.
+source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c0b8789952e7..545d0b3b9cb4 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
+obj-$(CONFIG_ALTERA_TSE) += altera/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
@@ -60,6 +61,7 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_S6GMAC) += s6gmac.o
+obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
obj-$(CONFIG_NET_VENDOR_SIS) += sis/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index c0f68dcd1dc1..7ae74d450e8f 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -307,11 +307,6 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
return bfin_mdio_poll();
}
-static int bfin_mdiobus_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void bfin_mac_adjust_link(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
@@ -1040,6 +1035,7 @@ static struct ptp_clock_info bfin_ptp_caps = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = bfin_ptp_adjfreq,
.adjtime = bfin_ptp_adjtime,
@@ -1086,7 +1082,7 @@ static inline void _tx_reclaim_skb(void)
tx_list_head->desc_a.config &= ~DMAEN;
tx_list_head->status.status_word = 0;
if (tx_list_head->skb) {
- dev_kfree_skb(tx_list_head->skb);
+ dev_consume_skb_any(tx_list_head->skb);
tx_list_head->skb = NULL;
}
tx_list_head = tx_list_head->next;
@@ -1823,7 +1819,6 @@ static int bfin_mii_bus_probe(struct platform_device *pdev)
goto out_err_alloc;
miibus->read = bfin_mdiobus_read;
miibus->write = bfin_mdiobus_write;
- miibus->reset = bfin_mdiobus_reset;
miibus->parent = &pdev->dev;
miibus->name = "bfin_mii_bus";
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index c5d75e7aeeb6..23578dfee249 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1213,11 +1213,6 @@ static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return 0;
}
-static int greth_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void greth_link_change(struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
@@ -1332,7 +1327,6 @@ static int greth_mdio_init(struct greth_private *greth)
snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
greth->mdio->read = greth_mdio_read;
greth->mdio->write = greth_mdio_write;
- greth->mdio->reset = greth_mdio_reset;
greth->mdio->priv = greth;
greth->mdio->irq = greth->mdio_irqs;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 511f6eecd58b..fcaeeb8a4929 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -476,7 +476,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
new file mode 100644
index 000000000000..80c1ab74a4b8
--- /dev/null
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -0,0 +1,8 @@
+config ALTERA_TSE
+ tristate "Altera Triple-Speed Ethernet MAC support"
+ select PHYLIB
+ ---help---
+ This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
+
+ To compile this driver as a module, choose M here. The module
+ will be called alteratse.
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
new file mode 100644
index 000000000000..d4a187e45369
--- /dev/null
+++ b/drivers/net/ethernet/altera/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Altera device drivers.
+#
+
+obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
+altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
+altera_msgdma.o altera_sgdma.o altera_utils.o
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
new file mode 100644
index 000000000000..3df18669ea30
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -0,0 +1,202 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/netdevice.h>
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_msgdmahw.h"
+
+/* No initialization work to do for MSGDMA */
+int msgdma_initialize(struct altera_tse_private *priv)
+{
+ return 0;
+}
+
+void msgdma_uninitialize(struct altera_tse_private *priv)
+{
+}
+
+void msgdma_reset(struct altera_tse_private *priv)
+{
+ int counter;
+ struct msgdma_csr *txcsr =
+ (struct msgdma_csr *)priv->tx_dma_csr;
+ struct msgdma_csr *rxcsr =
+ (struct msgdma_csr *)priv->rx_dma_csr;
+
+ /* Reset Rx mSGDMA */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+ iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+
+ counter = 0;
+ while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+ if (tse_bit_is_clear(&rxcsr->status,
+ MSGDMA_CSR_STAT_RESETTING))
+ break;
+ udelay(1);
+ }
+
+ if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
+ netif_warn(priv, drv, priv->dev,
+ "TSE Rx mSGDMA resetting bit never cleared!\n");
+
+ /* clear all status bits */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+
+ /* Reset Tx mSGDMA */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+ iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+
+ counter = 0;
+ while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+ if (tse_bit_is_clear(&txcsr->status,
+ MSGDMA_CSR_STAT_RESETTING))
+ break;
+ udelay(1);
+ }
+
+ if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
+ netif_warn(priv, drv, priv->dev,
+ "TSE Tx mSGDMA resetting bit never cleared!\n");
+
+ /* clear all status bits */
+ iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+}
+
+void msgdma_disable_rxirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->rx_dma_csr;
+ tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_enable_rxirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->rx_dma_csr;
+ tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_disable_txirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->tx_dma_csr;
+ tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_enable_txirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->tx_dma_csr;
+ tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_clear_rxirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->rx_dma_csr;
+ iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+}
+
+void msgdma_clear_txirq(struct altera_tse_private *priv)
+{
+ struct msgdma_csr *csr = priv->tx_dma_csr;
+ iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+}
+
+/* return 0 to indicate transmit is pending */
+int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+ struct msgdma_extended_desc *desc = priv->tx_dma_desc;
+
+ iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
+ iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
+ iowrite32(0, &desc->write_addr_lo);
+ iowrite32(0, &desc->write_addr_hi);
+ iowrite32(buffer->len, &desc->len);
+ iowrite32(0, &desc->burst_seq_num);
+ iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
+ iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+ return 0;
+}
+
+u32 msgdma_tx_completions(struct altera_tse_private *priv)
+{
+ u32 ready = 0;
+ u32 inuse;
+ u32 status;
+ struct msgdma_csr *txcsr =
+ (struct msgdma_csr *)priv->tx_dma_csr;
+
+ /* Get number of sent descriptors */
+ inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+
+ if (inuse) { /* Tx FIFO is not empty */
+ ready = priv->tx_prod - priv->tx_cons - inuse - 1;
+ } else {
+ /* Check for buffered last packet */
+ status = ioread32(&txcsr->status);
+ if (status & MSGDMA_CSR_STAT_BUSY)
+ ready = priv->tx_prod - priv->tx_cons - 1;
+ else
+ ready = priv->tx_prod - priv->tx_cons;
+ }
+ return ready;
+}
+
+/* Put buffer to the mSGDMA RX FIFO
+ */
+int msgdma_add_rx_desc(struct altera_tse_private *priv,
+ struct tse_buffer *rxbuffer)
+{
+ struct msgdma_extended_desc *desc = priv->rx_dma_desc;
+ u32 len = priv->rx_dma_buf_sz;
+ dma_addr_t dma_addr = rxbuffer->dma_addr;
+ u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
+ | MSGDMA_DESC_CTL_END_ON_LEN
+ | MSGDMA_DESC_CTL_TR_COMP_IRQ
+ | MSGDMA_DESC_CTL_EARLY_IRQ
+ | MSGDMA_DESC_CTL_TR_ERR_IRQ
+ | MSGDMA_DESC_CTL_GO);
+
+ iowrite32(0, &desc->read_addr_lo);
+ iowrite32(0, &desc->read_addr_hi);
+ iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
+ iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
+ iowrite32(len, &desc->len);
+ iowrite32(0, &desc->burst_seq_num);
+ iowrite32(0x00010001, &desc->stride);
+ iowrite32(control, &desc->control);
+ return 1;
+}
+
+/* status is returned on upper 16 bits,
+ * length is returned in lower 16 bits
+ */
+u32 msgdma_rx_status(struct altera_tse_private *priv)
+{
+ u32 rxstatus = 0;
+ u32 pktlength;
+ u32 pktstatus;
+ struct msgdma_csr *rxcsr =
+ (struct msgdma_csr *)priv->rx_dma_csr;
+ struct msgdma_response *rxresp =
+ (struct msgdma_response *)priv->rx_dma_resp;
+
+ if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
+ pktlength = ioread32(&rxresp->bytes_transferred);
+ pktstatus = ioread32(&rxresp->status);
+ rxstatus = pktstatus;
+ rxstatus = rxstatus << 16;
+ rxstatus |= (pktlength & 0xffff);
+ }
+ return rxstatus;
+}
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
new file mode 100644
index 000000000000..7f0f5bf2bba2
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdma.h
@@ -0,0 +1,34 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_MSGDMA_H__
+#define __ALTERA_MSGDMA_H__
+
+void msgdma_reset(struct altera_tse_private *);
+void msgdma_enable_txirq(struct altera_tse_private *);
+void msgdma_enable_rxirq(struct altera_tse_private *);
+void msgdma_disable_rxirq(struct altera_tse_private *);
+void msgdma_disable_txirq(struct altera_tse_private *);
+void msgdma_clear_rxirq(struct altera_tse_private *);
+void msgdma_clear_txirq(struct altera_tse_private *);
+u32 msgdma_tx_completions(struct altera_tse_private *);
+int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
+int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
+u32 msgdma_rx_status(struct altera_tse_private *);
+int msgdma_initialize(struct altera_tse_private *);
+void msgdma_uninitialize(struct altera_tse_private *);
+
+#endif /* __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
new file mode 100644
index 000000000000..d7b59ba4019c
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -0,0 +1,167 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_MSGDMAHW_H__
+#define __ALTERA_MSGDMAHW_H__
+
+/* mSGDMA standard descriptor format
+ */
+struct msgdma_desc {
+ u32 read_addr; /* data buffer source address */
+ u32 write_addr; /* data buffer destination address */
+ u32 len; /* the number of bytes to transfer per descriptor */
+ u32 control; /* characteristics of the transfer */
+};
+
+/* mSGDMA extended descriptor format
+ */
+struct msgdma_extended_desc {
+ u32 read_addr_lo; /* data buffer source address low bits */
+ u32 write_addr_lo; /* data buffer destination address low bits */
+ u32 len; /* the number of bytes to transfer
+ * per descriptor
+ */
+ u32 burst_seq_num; /* bit 31:24 write burst
+ * bit 23:16 read burst
+ * bit 15:0 sequence number
+ */
+ u32 stride; /* bit 31:16 write stride
+ * bit 15:0 read stride
+ */
+ u32 read_addr_hi; /* data buffer source address high bits */
+ u32 write_addr_hi; /* data buffer destination address high bits */
+ u32 control; /* characteristics of the transfer */
+};
+
+/* mSGDMA descriptor control field bit definitions
+ */
+#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
+#define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
+#define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
+#define MSGDMA_DESC_CTL_PARK_READS BIT(10)
+#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
+#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
+#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
+#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
+#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
+#define MSGDMA_DESC_CTL_TR_ERR_IRQ (0xff << 16)
+#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
+/* Writing ‘1’ to the ‘go’ bit commits the entire descriptor into the
+ * descriptor FIFO(s)
+ */
+#define MSGDMA_DESC_CTL_GO BIT(31)
+
+/* Tx buffer control flags
+ */
+#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
+ MSGDMA_DESC_CTL_GEN_EOP | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
+ MSGDMA_DESC_CTL_END_ON_LEN | \
+ MSGDMA_DESC_CTL_TR_COMP_IRQ | \
+ MSGDMA_DESC_CTL_EARLY_IRQ | \
+ MSGDMA_DESC_CTL_TR_ERR_IRQ | \
+ MSGDMA_DESC_CTL_GO)
+
+/* mSGDMA extended descriptor stride definitions
+ */
+#define MSGDMA_DESC_TX_STRIDE (0x00010001)
+#define MSGDMA_DESC_RX_STRIDE (0x00010001)
+
+/* mSGDMA dispatcher control and status register map
+ */
+struct msgdma_csr {
+ u32 status; /* Read/Clear */
+ u32 control; /* Read/Write */
+ u32 rw_fill_level; /* bit 31:16 - write fill level
+ * bit 15:0 - read fill level
+ */
+ u32 resp_fill_level; /* bit 15:0 */
+ u32 rw_seq_num; /* bit 31:16 - write sequence number
+ * bit 15:0 - read sequence number
+ */
+ u32 pad[3]; /* reserved */
+};
+
+/* mSGDMA CSR status register bit definitions
+ */
+#define MSGDMA_CSR_STAT_BUSY BIT(0)
+#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
+#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
+#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
+#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
+#define MSGDMA_CSR_STAT_STOPPED BIT(5)
+#define MSGDMA_CSR_STAT_RESETTING BIT(6)
+#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
+#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
+#define MSGDMA_CSR_STAT_IRQ BIT(9)
+#define MSGDMA_CSR_STAT_MASK 0x3FF
+#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ 0x1FF
+
+#define MSGDMA_CSR_STAT_BUSY_GET(v) GET_BIT_VALUE(v, 0)
+#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 1)
+#define MSGDMA_CSR_STAT_DESC_BUF_FULL_GET(v) GET_BIT_VALUE(v, 2)
+#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 3)
+#define MSGDMA_CSR_STAT_RESP_BUF_FULL_GET(v) GET_BIT_VALUE(v, 4)
+#define MSGDMA_CSR_STAT_STOPPED_GET(v) GET_BIT_VALUE(v, 5)
+#define MSGDMA_CSR_STAT_RESETTING_GET(v) GET_BIT_VALUE(v, 6)
+#define MSGDMA_CSR_STAT_STOPPED_ON_ERR_GET(v) GET_BIT_VALUE(v, 7)
+#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY_GET(v) GET_BIT_VALUE(v, 8)
+#define MSGDMA_CSR_STAT_IRQ_GET(v) GET_BIT_VALUE(v, 9)
+
+/* mSGDMA CSR control register bit definitions
+ */
+#define MSGDMA_CSR_CTL_STOP BIT(0)
+#define MSGDMA_CSR_CTL_RESET BIT(1)
+#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
+#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
+#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
+#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
+
+/* mSGDMA CSR fill level bits
+ */
+#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
+#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
+#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
+
+/* mSGDMA response register map
+ */
+struct msgdma_response {
+ u32 bytes_transferred;
+ u32 status;
+};
+
+/* mSGDMA response register bit definitions
+ */
+#define MSGDMA_RESP_EARLY_TERM BIT(8)
+#define MSGDMA_RESP_ERR_MASK 0xFF
+
+#endif /* __ALTERA_MSGDMA_H__*/
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
new file mode 100644
index 000000000000..0ee96639ae44
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -0,0 +1,509 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/list.h>
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_sgdmahw.h"
+#include "altera_sgdma.h"
+
+static void sgdma_descrip(struct sgdma_descrip *desc,
+ struct sgdma_descrip *ndesc,
+ dma_addr_t ndesc_phys,
+ dma_addr_t raddr,
+ dma_addr_t waddr,
+ u16 length,
+ int generate_eop,
+ int rfixed,
+ int wfixed);
+
+static int sgdma_async_write(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc);
+
+static int sgdma_async_read(struct altera_tse_private *priv);
+
+static dma_addr_t
+sgdma_txphysaddr(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc);
+
+static dma_addr_t
+sgdma_rxphysaddr(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc);
+
+static int sgdma_txbusy(struct altera_tse_private *priv);
+
+static int sgdma_rxbusy(struct altera_tse_private *priv);
+
+static void
+queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
+
+static void
+queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
+
+static struct tse_buffer *
+dequeue_tx(struct altera_tse_private *priv);
+
+static struct tse_buffer *
+dequeue_rx(struct altera_tse_private *priv);
+
+static struct tse_buffer *
+queue_rx_peekhead(struct altera_tse_private *priv);
+
+int sgdma_initialize(struct altera_tse_private *priv)
+{
+ priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
+
+ priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
+ SGDMA_CTRLREG_ILASTD;
+
+ INIT_LIST_HEAD(&priv->txlisthd);
+ INIT_LIST_HEAD(&priv->rxlisthd);
+
+ priv->rxdescphys = (dma_addr_t) 0;
+ priv->txdescphys = (dma_addr_t) 0;
+
+ priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+ priv->rxdescmem, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(priv->device, priv->rxdescphys)) {
+ sgdma_uninitialize(priv);
+ netdev_err(priv->dev, "error mapping rx descriptor memory\n");
+ return -EINVAL;
+ }
+
+ priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+ priv->txdescmem, DMA_TO_DEVICE);
+
+ if (dma_mapping_error(priv->device, priv->txdescphys)) {
+ sgdma_uninitialize(priv);
+ netdev_err(priv->dev, "error mapping tx descriptor memory\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void sgdma_uninitialize(struct altera_tse_private *priv)
+{
+ if (priv->rxdescphys)
+ dma_unmap_single(priv->device, priv->rxdescphys,
+ priv->rxdescmem, DMA_BIDIRECTIONAL);
+
+ if (priv->txdescphys)
+ dma_unmap_single(priv->device, priv->txdescphys,
+ priv->txdescmem, DMA_TO_DEVICE);
+}
+
+/* This function resets the SGDMA controller and clears the
+ * descriptor memory used for transmits and receives.
+ */
+void sgdma_reset(struct altera_tse_private *priv)
+{
+ u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
+ u32 txdescriplen = priv->txdescmem;
+ u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
+ u32 rxdescriplen = priv->rxdescmem;
+ struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
+ struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
+
+ /* Initialize descriptor memory to 0 */
+ memset(ptxdescripmem, 0, txdescriplen);
+ memset(prxdescripmem, 0, rxdescriplen);
+
+ iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
+ iowrite32(0, &ptxsgdma->control);
+
+ iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
+ iowrite32(0, &prxsgdma->control);
+}
+
+void sgdma_enable_rxirq(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
+ tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
+}
+
+void sgdma_enable_txirq(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
+ tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
+}
+
+/* for SGDMA, RX interrupts remain enabled after enabling */
+void sgdma_disable_rxirq(struct altera_tse_private *priv)
+{
+}
+
+/* for SGDMA, TX interrupts remain enabled after enabling */
+void sgdma_disable_txirq(struct altera_tse_private *priv)
+{
+}
+
+void sgdma_clear_rxirq(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+}
+
+void sgdma_clear_txirq(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+ tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+}
+
+/* transmits buffer through SGDMA. Returns number of buffers
+ * transmitted, 0 if not possible.
+ *
+ * tx_lock is held by the caller
+ */
+int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+ int pktstx = 0;
+ struct sgdma_descrip *descbase =
+ (struct sgdma_descrip *)priv->tx_dma_desc;
+
+ struct sgdma_descrip *cdesc = &descbase[0];
+ struct sgdma_descrip *ndesc = &descbase[1];
+
+ /* wait 'til the tx sgdma is ready for the next transmit request */
+ if (sgdma_txbusy(priv))
+ return 0;
+
+ sgdma_descrip(cdesc, /* current descriptor */
+ ndesc, /* next descriptor */
+ sgdma_txphysaddr(priv, ndesc),
+ buffer->dma_addr, /* address of packet to xmit */
+ 0, /* write addr 0 for tx dma */
+ buffer->len, /* length of packet */
+ SGDMA_CONTROL_EOP, /* Generate EOP */
+ 0, /* read fixed */
+ SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
+
+ pktstx = sgdma_async_write(priv, cdesc);
+
+ /* enqueue the request to the pending transmit queue */
+ queue_tx(priv, buffer);
+
+ return 1;
+}
+
+
+/* tx_lock held to protect access to queued tx list
+ */
+u32 sgdma_tx_completions(struct altera_tse_private *priv)
+{
+ u32 ready = 0;
+ struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
+
+ if (!sgdma_txbusy(priv) &&
+ ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+ (dequeue_tx(priv))) {
+ ready = 1;
+ }
+
+ return ready;
+}
+
+int sgdma_add_rx_desc(struct altera_tse_private *priv,
+ struct tse_buffer *rxbuffer)
+{
+ queue_rx(priv, rxbuffer);
+ return sgdma_async_read(priv);
+}
+
+/* status is returned on upper 16 bits,
+ * length is returned in lower 16 bits
+ */
+u32 sgdma_rx_status(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
+ struct sgdma_descrip *desc = NULL;
+ int pktsrx;
+ unsigned int rxstatus = 0;
+ unsigned int pktlength = 0;
+ unsigned int pktstatus = 0;
+ struct tse_buffer *rxbuffer = NULL;
+
+ dma_sync_single_for_cpu(priv->device,
+ priv->rxdescphys,
+ priv->rxdescmem,
+ DMA_BIDIRECTIONAL);
+
+ desc = &base[0];
+ if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
+ (desc->status & SGDMA_STATUS_EOP)) {
+ pktlength = desc->bytes_xferred;
+ pktstatus = desc->status & 0x3f;
+ rxstatus = pktstatus;
+ rxstatus = rxstatus << 16;
+ rxstatus |= (pktlength & 0xffff);
+
+ desc->status = 0;
+
+ rxbuffer = dequeue_rx(priv);
+ if (rxbuffer == NULL)
+ netdev_err(priv->dev,
+ "sgdma rx and rx queue empty!\n");
+
+ /* kick the rx sgdma after reaping this descriptor */
+ pktsrx = sgdma_async_read(priv);
+ }
+
+ return rxstatus;
+}
+
+
+/* Private functions */
+static void sgdma_descrip(struct sgdma_descrip *desc,
+ struct sgdma_descrip *ndesc,
+ dma_addr_t ndesc_phys,
+ dma_addr_t raddr,
+ dma_addr_t waddr,
+ u16 length,
+ int generate_eop,
+ int rfixed,
+ int wfixed)
+{
+ /* Clear the next descriptor as not owned by hardware */
+ u32 ctrl = ndesc->control;
+ ctrl &= ~SGDMA_CONTROL_HW_OWNED;
+ ndesc->control = ctrl;
+
+ ctrl = 0;
+ ctrl = SGDMA_CONTROL_HW_OWNED;
+ ctrl |= generate_eop;
+ ctrl |= rfixed;
+ ctrl |= wfixed;
+
+ /* Channel is implicitly zero, initialized to 0 by default */
+
+ desc->raddr = raddr;
+ desc->waddr = waddr;
+ desc->next = lower_32_bits(ndesc_phys);
+ desc->control = ctrl;
+ desc->status = 0;
+ desc->rburst = 0;
+ desc->wburst = 0;
+ desc->bytes = length;
+ desc->bytes_xferred = 0;
+}
+
+/* If hardware is busy, don't restart async read.
+ * if status register is 0 - meaning initial state, restart async read,
+ * probably for the first time when populating a receive buffer.
+ * If read status indicate not busy and a status, restart the async
+ * DMA read.
+ */
+static int sgdma_async_read(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ struct sgdma_descrip *descbase =
+ (struct sgdma_descrip *)priv->rx_dma_desc;
+
+ struct sgdma_descrip *cdesc = &descbase[0];
+ struct sgdma_descrip *ndesc = &descbase[1];
+
+ unsigned int sts = ioread32(&csr->status);
+ struct tse_buffer *rxbuffer = NULL;
+
+ if (!sgdma_rxbusy(priv)) {
+ rxbuffer = queue_rx_peekhead(priv);
+ if (rxbuffer == NULL)
+ return 0;
+
+ sgdma_descrip(cdesc, /* current descriptor */
+ ndesc, /* next descriptor */
+ sgdma_rxphysaddr(priv, ndesc),
+ 0, /* read addr 0 for rx dma */
+ rxbuffer->dma_addr, /* write addr for rx dma */
+ 0, /* read 'til EOP */
+ 0, /* EOP: NA for rx dma */
+ 0, /* read fixed: NA for rx dma */
+ 0); /* SOP: NA for rx DMA */
+
+ /* clear control and status */
+ iowrite32(0, &csr->control);
+
+ /* If status available, clear those bits */
+ if (sts & 0xf)
+ iowrite32(0xf, &csr->status);
+
+ dma_sync_single_for_device(priv->device,
+ priv->rxdescphys,
+ priv->rxdescmem,
+ DMA_BIDIRECTIONAL);
+
+ iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+ &csr->next_descrip);
+
+ iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+ &csr->control);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sgdma_async_write(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+
+ if (sgdma_txbusy(priv))
+ return 0;
+
+ /* clear control and status */
+ iowrite32(0, &csr->control);
+ iowrite32(0x1f, &csr->status);
+
+ dma_sync_single_for_device(priv->device, priv->txdescphys,
+ priv->txdescmem, DMA_TO_DEVICE);
+
+ iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+ &csr->next_descrip);
+
+ iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
+ &csr->control);
+
+ return 1;
+}
+
+static dma_addr_t
+sgdma_txphysaddr(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc)
+{
+ dma_addr_t paddr = priv->txdescmem_busaddr;
+ uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
+ return (dma_addr_t)((uintptr_t)paddr + offs);
+}
+
+static dma_addr_t
+sgdma_rxphysaddr(struct altera_tse_private *priv,
+ struct sgdma_descrip *desc)
+{
+ dma_addr_t paddr = priv->rxdescmem_busaddr;
+ uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
+ return (dma_addr_t)((uintptr_t)paddr + offs);
+}
+
+#define list_remove_head(list, entry, type, member) \
+ do { \
+ entry = NULL; \
+ if (!list_empty(list)) { \
+ entry = list_entry((list)->next, type, member); \
+ list_del_init(&entry->member); \
+ } \
+ } while (0)
+
+#define list_peek_head(list, entry, type, member) \
+ do { \
+ entry = NULL; \
+ if (!list_empty(list)) { \
+ entry = list_entry((list)->next, type, member); \
+ } \
+ } while (0)
+
+/* adds a tse_buffer to the tail of a tx buffer list.
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static void
+queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+ list_add_tail(&buffer->lh, &priv->txlisthd);
+}
+
+
+/* adds a tse_buffer to the tail of a rx buffer list
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static void
+queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+ list_add_tail(&buffer->lh, &priv->rxlisthd);
+}
+
+/* dequeues a tse_buffer from the transmit buffer list, otherwise
+ * returns NULL if empty.
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static struct tse_buffer *
+dequeue_tx(struct altera_tse_private *priv)
+{
+ struct tse_buffer *buffer = NULL;
+ list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
+ return buffer;
+}
+
+/* dequeues a tse_buffer from the receive buffer list, otherwise
+ * returns NULL if empty
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static struct tse_buffer *
+dequeue_rx(struct altera_tse_private *priv)
+{
+ struct tse_buffer *buffer = NULL;
+ list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
+ return buffer;
+}
+
+/* dequeues a tse_buffer from the receive buffer list, otherwise
+ * returns NULL if empty
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list while the
+ * head is being examined.
+ */
+static struct tse_buffer *
+queue_rx_peekhead(struct altera_tse_private *priv)
+{
+ struct tse_buffer *buffer = NULL;
+ list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
+ return buffer;
+}
+
+/* check and return rx sgdma status without polling
+ */
+static int sgdma_rxbusy(struct altera_tse_private *priv)
+{
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+ return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+}
+
+/* waits for the tx sgdma to finish it's current operation, returns 0
+ * when it transitions to nonbusy, returns 1 if the operation times out
+ */
+static int sgdma_txbusy(struct altera_tse_private *priv)
+{
+ int delay = 0;
+ struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+
+ /* if DMA is busy, wait for current transactino to finish */
+ while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+ udelay(1);
+
+ if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+ netdev_err(priv->dev, "timeout waiting for tx dma\n");
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
new file mode 100644
index 000000000000..07d471729dc4
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -0,0 +1,35 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_SGDMA_H__
+#define __ALTERA_SGDMA_H__
+
+void sgdma_reset(struct altera_tse_private *);
+void sgdma_enable_txirq(struct altera_tse_private *);
+void sgdma_enable_rxirq(struct altera_tse_private *);
+void sgdma_disable_rxirq(struct altera_tse_private *);
+void sgdma_disable_txirq(struct altera_tse_private *);
+void sgdma_clear_rxirq(struct altera_tse_private *);
+void sgdma_clear_txirq(struct altera_tse_private *);
+int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
+u32 sgdma_tx_completions(struct altera_tse_private *);
+int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
+void sgdma_status(struct altera_tse_private *);
+u32 sgdma_rx_status(struct altera_tse_private *);
+int sgdma_initialize(struct altera_tse_private *);
+void sgdma_uninitialize(struct altera_tse_private *);
+
+#endif /* __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
new file mode 100644
index 000000000000..ba3334f35383
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -0,0 +1,124 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_SGDMAHW_H__
+#define __ALTERA_SGDMAHW_H__
+
+/* SGDMA descriptor structure */
+struct sgdma_descrip {
+ unsigned int raddr; /* address of data to be read */
+ unsigned int pad1;
+ unsigned int waddr;
+ unsigned int pad2;
+ unsigned int next;
+ unsigned int pad3;
+ unsigned short bytes;
+ unsigned char rburst;
+ unsigned char wburst;
+ unsigned short bytes_xferred; /* 16 bits, bytes xferred */
+
+ /* bit 0: error
+ * bit 1: length error
+ * bit 2: crc error
+ * bit 3: truncated error
+ * bit 4: phy error
+ * bit 5: collision error
+ * bit 6: reserved
+ * bit 7: status eop for recv case
+ */
+ unsigned char status;
+
+ /* bit 0: eop
+ * bit 1: read_fixed
+ * bit 2: write fixed
+ * bits 3,4,5,6: Channel (always 0)
+ * bit 7: hardware owned
+ */
+ unsigned char control;
+} __packed;
+
+
+#define SGDMA_STATUS_ERR BIT(0)
+#define SGDMA_STATUS_LENGTH_ERR BIT(1)
+#define SGDMA_STATUS_CRC_ERR BIT(2)
+#define SGDMA_STATUS_TRUNC_ERR BIT(3)
+#define SGDMA_STATUS_PHY_ERR BIT(4)
+#define SGDMA_STATUS_COLL_ERR BIT(5)
+#define SGDMA_STATUS_EOP BIT(7)
+
+#define SGDMA_CONTROL_EOP BIT(0)
+#define SGDMA_CONTROL_RD_FIXED BIT(1)
+#define SGDMA_CONTROL_WR_FIXED BIT(2)
+
+/* Channel is always 0, so just zero initialize it */
+
+#define SGDMA_CONTROL_HW_OWNED BIT(7)
+
+/* SGDMA register space */
+struct sgdma_csr {
+ /* bit 0: error
+ * bit 1: eop
+ * bit 2: descriptor completed
+ * bit 3: chain completed
+ * bit 4: busy
+ * remainder reserved
+ */
+ u32 status;
+ u32 pad1[3];
+
+ /* bit 0: interrupt on error
+ * bit 1: interrupt on eop
+ * bit 2: interrupt after every descriptor
+ * bit 3: interrupt after last descrip in a chain
+ * bit 4: global interrupt enable
+ * bit 5: starts descriptor processing
+ * bit 6: stop core on dma error
+ * bit 7: interrupt on max descriptors
+ * bits 8-15: max descriptors to generate interrupt
+ * bit 16: Software reset
+ * bit 17: clears owned by hardware if 0, does not clear otherwise
+ * bit 18: enables descriptor polling mode
+ * bit 19-26: clocks before polling again
+ * bit 27-30: reserved
+ * bit 31: clear interrupt
+ */
+ u32 control;
+ u32 pad2[3];
+ u32 next_descrip;
+ u32 pad3[3];
+};
+
+
+#define SGDMA_STSREG_ERR BIT(0) /* Error */
+#define SGDMA_STSREG_EOP BIT(1) /* EOP */
+#define SGDMA_STSREG_DESCRIP BIT(2) /* Descriptor completed */
+#define SGDMA_STSREG_CHAIN BIT(3) /* Chain completed */
+#define SGDMA_STSREG_BUSY BIT(4) /* Controller busy */
+
+#define SGDMA_CTRLREG_IOE BIT(0) /* Interrupt on error */
+#define SGDMA_CTRLREG_IOEOP BIT(1) /* Interrupt on EOP */
+#define SGDMA_CTRLREG_IDESCRIP BIT(2) /* Interrupt after every descriptor */
+#define SGDMA_CTRLREG_ILASTD BIT(3) /* Interrupt after last descriptor */
+#define SGDMA_CTRLREG_INTEN BIT(4) /* Global Interrupt enable */
+#define SGDMA_CTRLREG_START BIT(5) /* starts descriptor processing */
+#define SGDMA_CTRLREG_STOPERR BIT(6) /* stop on dma error */
+#define SGDMA_CTRLREG_INTMAX BIT(7) /* Interrupt on max descriptors */
+#define SGDMA_CTRLREG_RESET BIT(16)/* Software reset */
+#define SGDMA_CTRLREG_COBHW BIT(17)/* Clears owned by hardware */
+#define SGDMA_CTRLREG_POLL BIT(18)/* enables descriptor polling mode */
+#define SGDMA_CTRLREG_CLRINT BIT(31)/* Clears interrupt */
+
+#endif /* __ALTERA_SGDMAHW_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
new file mode 100644
index 000000000000..8feeed05de0e
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -0,0 +1,486 @@
+/* Altera Triple-Speed Ethernet MAC driver
+ * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
+ *
+ * Contributors:
+ * Dalon Westergreen
+ * Thomas Chou
+ * Ian Abbott
+ * Yuriy Kozlov
+ * Tobias Klauser
+ * Andriy Smolskyy
+ * Roman Bulgakov
+ * Dmytro Mytarchuk
+ * Matthew Gerlach
+ *
+ * Original driver contributed by SLS.
+ * Major updates contributed by GlobalLogic
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_TSE_H__
+#define __ALTERA_TSE_H__
+
+#define ALTERA_TSE_RESOURCE_NAME "altera_tse"
+
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
+#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
+ * bytes
+ */
+/* Rx FIFO default settings */
+#define ALTERA_TSE_RX_SECTION_EMPTY 16
+#define ALTERA_TSE_RX_SECTION_FULL 0
+#define ALTERA_TSE_RX_ALMOST_EMPTY 8
+#define ALTERA_TSE_RX_ALMOST_FULL 8
+
+/* Tx FIFO default settings */
+#define ALTERA_TSE_TX_SECTION_EMPTY 16
+#define ALTERA_TSE_TX_SECTION_FULL 0
+#define ALTERA_TSE_TX_ALMOST_EMPTY 8
+#define ALTERA_TSE_TX_ALMOST_FULL 3
+
+/* MAC function configuration default settings */
+#define ALTERA_TSE_TX_IPG_LENGTH 12
+
+#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
+
+/* MAC Command_Config Register Bit Definitions
+ */
+#define MAC_CMDCFG_TX_ENA BIT(0)
+#define MAC_CMDCFG_RX_ENA BIT(1)
+#define MAC_CMDCFG_XON_GEN BIT(2)
+#define MAC_CMDCFG_ETH_SPEED BIT(3)
+#define MAC_CMDCFG_PROMIS_EN BIT(4)
+#define MAC_CMDCFG_PAD_EN BIT(5)
+#define MAC_CMDCFG_CRC_FWD BIT(6)
+#define MAC_CMDCFG_PAUSE_FWD BIT(7)
+#define MAC_CMDCFG_PAUSE_IGNORE BIT(8)
+#define MAC_CMDCFG_TX_ADDR_INS BIT(9)
+#define MAC_CMDCFG_HD_ENA BIT(10)
+#define MAC_CMDCFG_EXCESS_COL BIT(11)
+#define MAC_CMDCFG_LATE_COL BIT(12)
+#define MAC_CMDCFG_SW_RESET BIT(13)
+#define MAC_CMDCFG_MHASH_SEL BIT(14)
+#define MAC_CMDCFG_LOOP_ENA BIT(15)
+#define MAC_CMDCFG_TX_ADDR_SEL(v) (((v) & 0x7) << 16)
+#define MAC_CMDCFG_MAGIC_ENA BIT(19)
+#define MAC_CMDCFG_SLEEP BIT(20)
+#define MAC_CMDCFG_WAKEUP BIT(21)
+#define MAC_CMDCFG_XOFF_GEN BIT(22)
+#define MAC_CMDCFG_CNTL_FRM_ENA BIT(23)
+#define MAC_CMDCFG_NO_LGTH_CHECK BIT(24)
+#define MAC_CMDCFG_ENA_10 BIT(25)
+#define MAC_CMDCFG_RX_ERR_DISC BIT(26)
+#define MAC_CMDCFG_DISABLE_READ_TIMEOUT BIT(27)
+#define MAC_CMDCFG_CNT_RESET BIT(31)
+
+#define MAC_CMDCFG_TX_ENA_GET(v) GET_BIT_VALUE(v, 0)
+#define MAC_CMDCFG_RX_ENA_GET(v) GET_BIT_VALUE(v, 1)
+#define MAC_CMDCFG_XON_GEN_GET(v) GET_BIT_VALUE(v, 2)
+#define MAC_CMDCFG_ETH_SPEED_GET(v) GET_BIT_VALUE(v, 3)
+#define MAC_CMDCFG_PROMIS_EN_GET(v) GET_BIT_VALUE(v, 4)
+#define MAC_CMDCFG_PAD_EN_GET(v) GET_BIT_VALUE(v, 5)
+#define MAC_CMDCFG_CRC_FWD_GET(v) GET_BIT_VALUE(v, 6)
+#define MAC_CMDCFG_PAUSE_FWD_GET(v) GET_BIT_VALUE(v, 7)
+#define MAC_CMDCFG_PAUSE_IGNORE_GET(v) GET_BIT_VALUE(v, 8)
+#define MAC_CMDCFG_TX_ADDR_INS_GET(v) GET_BIT_VALUE(v, 9)
+#define MAC_CMDCFG_HD_ENA_GET(v) GET_BIT_VALUE(v, 10)
+#define MAC_CMDCFG_EXCESS_COL_GET(v) GET_BIT_VALUE(v, 11)
+#define MAC_CMDCFG_LATE_COL_GET(v) GET_BIT_VALUE(v, 12)
+#define MAC_CMDCFG_SW_RESET_GET(v) GET_BIT_VALUE(v, 13)
+#define MAC_CMDCFG_MHASH_SEL_GET(v) GET_BIT_VALUE(v, 14)
+#define MAC_CMDCFG_LOOP_ENA_GET(v) GET_BIT_VALUE(v, 15)
+#define MAC_CMDCFG_TX_ADDR_SEL_GET(v) (((v) >> 16) & 0x7)
+#define MAC_CMDCFG_MAGIC_ENA_GET(v) GET_BIT_VALUE(v, 19)
+#define MAC_CMDCFG_SLEEP_GET(v) GET_BIT_VALUE(v, 20)
+#define MAC_CMDCFG_WAKEUP_GET(v) GET_BIT_VALUE(v, 21)
+#define MAC_CMDCFG_XOFF_GEN_GET(v) GET_BIT_VALUE(v, 22)
+#define MAC_CMDCFG_CNTL_FRM_ENA_GET(v) GET_BIT_VALUE(v, 23)
+#define MAC_CMDCFG_NO_LGTH_CHECK_GET(v) GET_BIT_VALUE(v, 24)
+#define MAC_CMDCFG_ENA_10_GET(v) GET_BIT_VALUE(v, 25)
+#define MAC_CMDCFG_RX_ERR_DISC_GET(v) GET_BIT_VALUE(v, 26)
+#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
+#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
+
+/* MDIO registers within MAC register Space
+ */
+struct altera_tse_mdio {
+ u32 control; /* PHY device operation control register */
+ u32 status; /* PHY device operation status register */
+ u32 phy_id1; /* Bits 31:16 of PHY identifier */
+ u32 phy_id2; /* Bits 15:0 of PHY identifier */
+ u32 auto_negotiation_advertisement; /* Auto-negotiation
+ * advertisement
+ * register
+ */
+ u32 remote_partner_base_page_ability;
+
+ u32 reg6;
+ u32 reg7;
+ u32 reg8;
+ u32 reg9;
+ u32 rega;
+ u32 regb;
+ u32 regc;
+ u32 regd;
+ u32 rege;
+ u32 regf;
+ u32 reg10;
+ u32 reg11;
+ u32 reg12;
+ u32 reg13;
+ u32 reg14;
+ u32 reg15;
+ u32 reg16;
+ u32 reg17;
+ u32 reg18;
+ u32 reg19;
+ u32 reg1a;
+ u32 reg1b;
+ u32 reg1c;
+ u32 reg1d;
+ u32 reg1e;
+ u32 reg1f;
+};
+
+/* MAC register Space. Note that some of these registers may or may not be
+ * present depending upon options chosen by the user when the core was
+ * configured and built. Please consult the Altera Triple Speed Ethernet User
+ * Guide for details.
+ */
+struct altera_tse_mac {
+ /* Bits 15:0: MegaCore function revision (0x0800). Bit 31:16: Customer
+ * specific revision
+ */
+ u32 megacore_revision;
+ /* Provides a memory location for user applications to test the device
+ * memory operation.
+ */
+ u32 scratch_pad;
+ /* The host processor uses this register to control and configure the
+ * MAC block
+ */
+ u32 command_config;
+ /* 32-bit primary MAC address word 0 bits 0 to 31 of the primary
+ * MAC address
+ */
+ u32 mac_addr_0;
+ /* 32-bit primary MAC address word 1 bits 32 to 47 of the primary
+ * MAC address
+ */
+ u32 mac_addr_1;
+ /* 14-bit maximum frame length. The MAC receive logic */
+ u32 frm_length;
+ /* The pause quanta is used in each pause frame sent to a remote
+ * Ethernet device, in increments of 512 Ethernet bit times
+ */
+ u32 pause_quanta;
+ /* 12-bit receive FIFO section-empty threshold */
+ u32 rx_section_empty;
+ /* 12-bit receive FIFO section-full threshold */
+ u32 rx_section_full;
+ /* 12-bit transmit FIFO section-empty threshold */
+ u32 tx_section_empty;
+ /* 12-bit transmit FIFO section-full threshold */
+ u32 tx_section_full;
+ /* 12-bit receive FIFO almost-empty threshold */
+ u32 rx_almost_empty;
+ /* 12-bit receive FIFO almost-full threshold */
+ u32 rx_almost_full;
+ /* 12-bit transmit FIFO almost-empty threshold */
+ u32 tx_almost_empty;
+ /* 12-bit transmit FIFO almost-full threshold */
+ u32 tx_almost_full;
+ /* MDIO address of PHY Device 0. Bits 0 to 4 hold a 5-bit PHY address */
+ u32 mdio_phy0_addr;
+ /* MDIO address of PHY Device 1. Bits 0 to 4 hold a 5-bit PHY address */
+ u32 mdio_phy1_addr;
+
+ /* Bit[15:0]—16-bit holdoff quanta */
+ u32 holdoff_quant;
+
+ /* only if 100/1000 BaseX PCS, reserved otherwise */
+ u32 reserved1[5];
+
+ /* Minimum IPG between consecutive transmit frame in terms of bytes */
+ u32 tx_ipg_length;
+
+ /* IEEE 802.3 oEntity Managed Object Support */
+
+ /* The MAC addresses */
+ u32 mac_id_1;
+ u32 mac_id_2;
+
+ /* Number of frames transmitted without error including pause frames */
+ u32 frames_transmitted_ok;
+ /* Number of frames received without error including pause frames */
+ u32 frames_received_ok;
+ /* Number of frames received with a CRC error */
+ u32 frames_check_sequence_errors;
+ /* Frame received with an alignment error */
+ u32 alignment_errors;
+ /* Sum of payload and padding octets of frames transmitted without
+ * error
+ */
+ u32 octets_transmitted_ok;
+ /* Sum of payload and padding octets of frames received without error */
+ u32 octets_received_ok;
+
+ /* IEEE 802.3 oPausedEntity Managed Object Support */
+
+ /* Number of transmitted pause frames */
+ u32 tx_pause_mac_ctrl_frames;
+ /* Number of Received pause frames */
+ u32 rx_pause_mac_ctrl_frames;
+
+ /* IETF MIB (MIB-II) Object Support */
+
+ /* Number of frames received with error */
+ u32 if_in_errors;
+ /* Number of frames transmitted with error */
+ u32 if_out_errors;
+ /* Number of valid received unicast frames */
+ u32 if_in_ucast_pkts;
+ /* Number of valid received multicasts frames (without pause) */
+ u32 if_in_multicast_pkts;
+ /* Number of valid received broadcast frames */
+ u32 if_in_broadcast_pkts;
+ u32 if_out_discards;
+ /* The number of valid unicast frames transmitted */
+ u32 if_out_ucast_pkts;
+ /* The number of valid multicast frames transmitted,
+ * excluding pause frames
+ */
+ u32 if_out_multicast_pkts;
+ u32 if_out_broadcast_pkts;
+
+ /* IETF RMON MIB Object Support */
+
+ /* Counts the number of dropped packets due to internal errors
+ * of the MAC client.
+ */
+ u32 ether_stats_drop_events;
+ /* Total number of bytes received. Good and bad frames. */
+ u32 ether_stats_octets;
+ /* Total number of packets received. Counts good and bad packets. */
+ u32 ether_stats_pkts;
+ /* Number of packets received with less than 64 bytes. */
+ u32 ether_stats_undersize_pkts;
+ /* The number of frames received that are longer than the
+ * value configured in the frm_length register
+ */
+ u32 ether_stats_oversize_pkts;
+ /* Number of received packet with 64 bytes */
+ u32 ether_stats_pkts_64_octets;
+ /* Frames (good and bad) with 65 to 127 bytes */
+ u32 ether_stats_pkts_65to127_octets;
+ /* Frames (good and bad) with 128 to 255 bytes */
+ u32 ether_stats_pkts_128to255_octets;
+ /* Frames (good and bad) with 256 to 511 bytes */
+ u32 ether_stats_pkts_256to511_octets;
+ /* Frames (good and bad) with 512 to 1023 bytes */
+ u32 ether_stats_pkts_512to1023_octets;
+ /* Frames (good and bad) with 1024 to 1518 bytes */
+ u32 ether_stats_pkts_1024to1518_octets;
+
+ /* Any frame length from 1519 to the maximum length configured in the
+ * frm_length register, if it is greater than 1518
+ */
+ u32 ether_stats_pkts_1519tox_octets;
+ /* Too long frames with CRC error */
+ u32 ether_stats_jabbers;
+ /* Too short frames with CRC error */
+ u32 ether_stats_fragments;
+
+ u32 reserved2;
+
+ /* FIFO control register */
+ u32 tx_cmd_stat;
+ u32 rx_cmd_stat;
+
+ /* Extended Statistics Counters */
+ u32 msb_octets_transmitted_ok;
+ u32 msb_octets_received_ok;
+ u32 msb_ether_stats_octets;
+
+ u32 reserved3;
+
+ /* Multicast address resolution table, mapped in the controller address
+ * space
+ */
+ u32 hash_table[64];
+
+ /* Registers 0 to 31 within PHY device 0/1 connected to the MDIO PHY
+ * management interface
+ */
+ struct altera_tse_mdio mdio_phy0;
+ struct altera_tse_mdio mdio_phy1;
+
+ /* 4 Supplemental MAC Addresses */
+ u32 supp_mac_addr_0_0;
+ u32 supp_mac_addr_0_1;
+ u32 supp_mac_addr_1_0;
+ u32 supp_mac_addr_1_1;
+ u32 supp_mac_addr_2_0;
+ u32 supp_mac_addr_2_1;
+ u32 supp_mac_addr_3_0;
+ u32 supp_mac_addr_3_1;
+
+ u32 reserved4[8];
+
+ /* IEEE 1588v2 Feature */
+ u32 tx_period;
+ u32 tx_adjust_fns;
+ u32 tx_adjust_ns;
+ u32 rx_period;
+ u32 rx_adjust_fns;
+ u32 rx_adjust_ns;
+
+ u32 reserved5[42];
+};
+
+/* Transmit and Receive Command Registers Bit Definitions
+ */
+#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
+#define ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 BIT(18)
+#define ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16 BIT(25)
+
+/* Wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct tse_buffer {
+ struct list_head lh;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 len;
+ int mapped_as_page;
+};
+
+struct altera_tse_private;
+
+#define ALTERA_DTYPE_SGDMA 1
+#define ALTERA_DTYPE_MSGDMA 2
+
+/* standard DMA interface for SGDMA and MSGDMA */
+struct altera_dmaops {
+ int altera_dtype;
+ int dmamask;
+ void (*reset_dma)(struct altera_tse_private *);
+ void (*enable_txirq)(struct altera_tse_private *);
+ void (*enable_rxirq)(struct altera_tse_private *);
+ void (*disable_txirq)(struct altera_tse_private *);
+ void (*disable_rxirq)(struct altera_tse_private *);
+ void (*clear_txirq)(struct altera_tse_private *);
+ void (*clear_rxirq)(struct altera_tse_private *);
+ int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
+ u32 (*tx_completions)(struct altera_tse_private *);
+ int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
+ u32 (*get_rx_status)(struct altera_tse_private *);
+ int (*init_dma)(struct altera_tse_private *);
+ void (*uninit_dma)(struct altera_tse_private *);
+};
+
+/* This structure is private to each device.
+ */
+struct altera_tse_private {
+ struct net_device *dev;
+ struct device *device;
+ struct napi_struct napi;
+
+ /* MAC address space */
+ struct altera_tse_mac __iomem *mac_dev;
+
+ /* TSE Revision */
+ u32 revision;
+
+ /* mSGDMA Rx Dispatcher address space */
+ void __iomem *rx_dma_csr;
+ void __iomem *rx_dma_desc;
+ void __iomem *rx_dma_resp;
+
+ /* mSGDMA Tx Dispatcher address space */
+ void __iomem *tx_dma_csr;
+ void __iomem *tx_dma_desc;
+
+ /* Rx buffers queue */
+ struct tse_buffer *rx_ring;
+ u32 rx_cons;
+ u32 rx_prod;
+ u32 rx_ring_size;
+ u32 rx_dma_buf_sz;
+
+ /* Tx ring buffer */
+ struct tse_buffer *tx_ring;
+ u32 tx_prod;
+ u32 tx_cons;
+ u32 tx_ring_size;
+
+ /* Interrupts */
+ u32 tx_irq;
+ u32 rx_irq;
+
+ /* RX/TX MAC FIFO configs */
+ u32 tx_fifo_depth;
+ u32 rx_fifo_depth;
+ u32 max_mtu;
+
+ /* Hash filter settings */
+ u32 hash_filter;
+ u32 added_unicast;
+
+ /* Descriptor memory info for managing SGDMA */
+ u32 txdescmem;
+ u32 rxdescmem;
+ dma_addr_t rxdescmem_busaddr;
+ dma_addr_t txdescmem_busaddr;
+ u32 txctrlreg;
+ u32 rxctrlreg;
+ dma_addr_t rxdescphys;
+ dma_addr_t txdescphys;
+
+ struct list_head txlisthd;
+ struct list_head rxlisthd;
+
+ /* MAC command_config register protection */
+ spinlock_t mac_cfg_lock;
+ /* Tx path protection */
+ spinlock_t tx_lock;
+ /* Rx DMA & interrupt control protection */
+ spinlock_t rxdma_irq_lock;
+
+ /* PHY */
+ int phy_addr; /* PHY's MDIO address, -1 for autodetection */
+ phy_interface_t phy_iface;
+ struct mii_bus *mdio;
+ struct phy_device *phydev;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ /* ethtool msglvl option */
+ u32 msg_enable;
+
+ struct altera_dmaops *dmaops;
+};
+
+/* Function prototypes
+ */
+void altera_tse_set_ethtool_ops(struct net_device *);
+
+#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
new file mode 100644
index 000000000000..319ca74f5e74
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -0,0 +1,235 @@
+/* Ethtool support for Altera Triple-Speed Ethernet MAC driver
+ * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
+ *
+ * Contributors:
+ * Dalon Westergreen
+ * Thomas Chou
+ * Ian Abbott
+ * Yuriy Kozlov
+ * Tobias Klauser
+ * Andriy Smolskyy
+ * Roman Bulgakov
+ * Dmytro Mytarchuk
+ *
+ * Original driver contributed by SLS.
+ * Major updates contributed by GlobalLogic
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "altera_tse.h"
+
+#define TSE_STATS_LEN 31
+#define TSE_NUM_REGS 128
+
+static char const stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx_packets",
+ "rx_packets",
+ "rx_crc_errors",
+ "rx_align_errors",
+ "tx_bytes",
+ "rx_bytes",
+ "tx_pause",
+ "rx_pause",
+ "rx_errors",
+ "tx_errors",
+ "rx_unicast",
+ "rx_multicast",
+ "rx_broadcast",
+ "tx_discards",
+ "tx_unicast",
+ "tx_multicast",
+ "tx_broadcast",
+ "ether_drops",
+ "rx_total_bytes",
+ "rx_total_packets",
+ "rx_undersize",
+ "rx_oversize",
+ "rx_64_bytes",
+ "rx_65_127_bytes",
+ "rx_128_255_bytes",
+ "rx_256_511_bytes",
+ "rx_512_1023_bytes",
+ "rx_1024_1518_bytes",
+ "rx_gte_1519_bytes",
+ "rx_jabbers",
+ "rx_runts",
+};
+
+static void tse_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ u32 rev = ioread32(&priv->mac_dev->megacore_revision);
+
+ strcpy(info->driver, "Altera TSE MAC IP Driver");
+ strcpy(info->version, "v8.0");
+ snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
+ rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
+ sprintf(info->bus_info, "platform");
+}
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats
+ */
+static void tse_gstrings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ memcpy(buf, stat_gstrings, TSE_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 *buf)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+ u64 ext;
+
+ buf[0] = ioread32(&mac->frames_transmitted_ok);
+ buf[1] = ioread32(&mac->frames_received_ok);
+ buf[2] = ioread32(&mac->frames_check_sequence_errors);
+ buf[3] = ioread32(&mac->alignment_errors);
+
+ /* Extended aOctetsTransmittedOK counter */
+ ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
+ ext |= ioread32(&mac->octets_transmitted_ok);
+ buf[4] = ext;
+
+ /* Extended aOctetsReceivedOK counter */
+ ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
+ ext |= ioread32(&mac->octets_received_ok);
+ buf[5] = ext;
+
+ buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
+ buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
+ buf[8] = ioread32(&mac->if_in_errors);
+ buf[9] = ioread32(&mac->if_out_errors);
+ buf[10] = ioread32(&mac->if_in_ucast_pkts);
+ buf[11] = ioread32(&mac->if_in_multicast_pkts);
+ buf[12] = ioread32(&mac->if_in_broadcast_pkts);
+ buf[13] = ioread32(&mac->if_out_discards);
+ buf[14] = ioread32(&mac->if_out_ucast_pkts);
+ buf[15] = ioread32(&mac->if_out_multicast_pkts);
+ buf[16] = ioread32(&mac->if_out_broadcast_pkts);
+ buf[17] = ioread32(&mac->ether_stats_drop_events);
+
+ /* Extended etherStatsOctets counter */
+ ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
+ ext |= ioread32(&mac->ether_stats_octets);
+ buf[18] = ext;
+
+ buf[19] = ioread32(&mac->ether_stats_pkts);
+ buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
+ buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
+ buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
+ buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
+ buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
+ buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
+ buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
+ buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
+ buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
+ buf[29] = ioread32(&mac->ether_stats_jabbers);
+ buf[30] = ioread32(&mac->ether_stats_fragments);
+}
+
+static int tse_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return TSE_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 tse_get_msglevel(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void tse_set_msglevel(struct net_device *dev, uint32_t data)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ priv->msg_enable = data;
+}
+
+static int tse_reglen(struct net_device *dev)
+{
+ return TSE_NUM_REGS * sizeof(u32);
+}
+
+static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *regbuf)
+{
+ int i;
+ struct altera_tse_private *priv = netdev_priv(dev);
+ u32 *tse_mac_regs = (u32 *)priv->mac_dev;
+ u32 *buf = regbuf;
+
+ /* Set version to a known value, so ethtool knows
+ * how to do any special formatting of this data.
+ * This version number will need to change if and
+ * when this register table is changed.
+ */
+
+ regs->version = 1;
+
+ for (i = 0; i < TSE_NUM_REGS; i++)
+ buf[i] = ioread32(&tse_mac_regs[i]);
+}
+
+static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (phydev == NULL)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (phydev == NULL)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static const struct ethtool_ops tse_ethtool_ops = {
+ .get_drvinfo = tse_get_drvinfo,
+ .get_regs_len = tse_reglen,
+ .get_regs = tse_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_settings = tse_get_settings,
+ .set_settings = tse_set_settings,
+ .get_strings = tse_gstrings,
+ .get_sset_count = tse_sset_count,
+ .get_ethtool_stats = tse_fill_stats,
+ .get_msglevel = tse_get_msglevel,
+ .set_msglevel = tse_set_msglevel,
+};
+
+void altera_tse_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
new file mode 100644
index 000000000000..c70a29e0b9f7
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -0,0 +1,1543 @@
+/* Altera Triple-Speed Ethernet MAC driver
+ * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
+ *
+ * Contributors:
+ * Dalon Westergreen
+ * Thomas Chou
+ * Ian Abbott
+ * Yuriy Kozlov
+ * Tobias Klauser
+ * Andriy Smolskyy
+ * Roman Bulgakov
+ * Dmytro Mytarchuk
+ * Matthew Gerlach
+ *
+ * Original driver contributed by SLS.
+ * Major updates contributed by GlobalLogic
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <asm/cacheflush.h>
+
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_sgdma.h"
+#include "altera_msgdma.h"
+
+static atomic_t instance_count = ATOMIC_INIT(~0);
+/* Module parameters */
+static int debug = -1;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN);
+
+#define RX_DESCRIPTORS 64
+static int dma_rx_num = RX_DESCRIPTORS;
+module_param(dma_rx_num, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
+
+#define TX_DESCRIPTORS 64
+static int dma_tx_num = TX_DESCRIPTORS;
+module_param(dma_tx_num, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
+
+
+#define POLL_PHY (-1)
+
+/* Make sure DMA buffer size is larger than the max frame size
+ * plus some alignment offset and a VLAN header. If the max frame size is
+ * 1518, a VLAN header would be additional 4 bytes and additional
+ * headroom for alignment is 2 bytes, 2048 is just fine.
+ */
+#define ALTERA_RXDMABUFFER_SIZE 2048
+
+/* Allow network stack to resume queueing packets after we've
+ * finished transmitting at least 1/4 of the packets in the queue.
+ */
+#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
+
+#define TXQUEUESTOP_THRESHHOLD 2
+
+static struct of_device_id altera_tse_ids[];
+
+static inline u32 tse_tx_avail(struct altera_tse_private *priv)
+{
+ return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
+}
+
+/* MDIO specific functions
+ */
+static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
+ unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+ u32 data;
+
+ /* set MDIO address */
+ iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+
+ /* get the data */
+ data = ioread32(&mdio_regs[regnum]) & 0xffff;
+ return data;
+}
+
+static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
+ unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+
+ /* set MDIO address */
+ iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+
+ /* write the data */
+ iowrite32((u32) value, &mdio_regs[regnum]);
+ return 0;
+}
+
+static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ int ret;
+ int i;
+ struct device_node *mdio_node = NULL;
+ struct mii_bus *mdio = NULL;
+ struct device_node *child_node = NULL;
+
+ for_each_child_of_node(priv->device->of_node, child_node) {
+ if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
+ mdio_node = child_node;
+ break;
+ }
+ }
+
+ if (mdio_node) {
+ netdev_dbg(dev, "FOUND MDIO subnode\n");
+ } else {
+ netdev_dbg(dev, "NO MDIO subnode\n");
+ return 0;
+ }
+
+ mdio = mdiobus_alloc();
+ if (mdio == NULL) {
+ netdev_err(dev, "Error allocating MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ mdio->name = ALTERA_TSE_RESOURCE_NAME;
+ mdio->read = &altera_tse_mdio_read;
+ mdio->write = &altera_tse_mdio_write;
+ snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
+
+ mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+ if (mdio->irq == NULL) {
+ ret = -ENOMEM;
+ goto out_free_mdio;
+ }
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mdio->irq[i] = PHY_POLL;
+
+ mdio->priv = priv->mac_dev;
+ mdio->parent = priv->device;
+
+ ret = of_mdiobus_register(mdio, mdio_node);
+ if (ret != 0) {
+ netdev_err(dev, "Cannot register MDIO bus %s\n",
+ mdio->id);
+ goto out_free_mdio_irq;
+ }
+
+ if (netif_msg_drv(priv))
+ netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
+
+ priv->mdio = mdio;
+ return 0;
+out_free_mdio_irq:
+ kfree(mdio->irq);
+out_free_mdio:
+ mdiobus_free(mdio);
+ mdio = NULL;
+ return ret;
+}
+
+static void altera_tse_mdio_destroy(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ if (priv->mdio == NULL)
+ return;
+
+ if (netif_msg_drv(priv))
+ netdev_info(dev, "MDIO bus %s: removed\n",
+ priv->mdio->id);
+
+ mdiobus_unregister(priv->mdio);
+ kfree(priv->mdio->irq);
+ mdiobus_free(priv->mdio);
+ priv->mdio = NULL;
+}
+
+static int tse_init_rx_buffer(struct altera_tse_private *priv,
+ struct tse_buffer *rxbuffer, int len)
+{
+ rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
+ if (!rxbuffer->skb)
+ return -ENOMEM;
+
+ rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
+ len,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
+ netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
+ dev_kfree_skb_any(rxbuffer->skb);
+ return -EINVAL;
+ }
+ rxbuffer->len = len;
+ return 0;
+}
+
+static void tse_free_rx_buffer(struct altera_tse_private *priv,
+ struct tse_buffer *rxbuffer)
+{
+ struct sk_buff *skb = rxbuffer->skb;
+ dma_addr_t dma_addr = rxbuffer->dma_addr;
+
+ if (skb != NULL) {
+ if (dma_addr)
+ dma_unmap_single(priv->device, dma_addr,
+ rxbuffer->len,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ rxbuffer->skb = NULL;
+ rxbuffer->dma_addr = 0;
+ }
+}
+
+/* Unmap and free Tx buffer resources
+ */
+static void tse_free_tx_buffer(struct altera_tse_private *priv,
+ struct tse_buffer *buffer)
+{
+ if (buffer->dma_addr) {
+ if (buffer->mapped_as_page)
+ dma_unmap_page(priv->device, buffer->dma_addr,
+ buffer->len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device, buffer->dma_addr,
+ buffer->len, DMA_TO_DEVICE);
+ buffer->dma_addr = 0;
+ }
+ if (buffer->skb) {
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+}
+
+static int alloc_init_skbufs(struct altera_tse_private *priv)
+{
+ unsigned int rx_descs = priv->rx_ring_size;
+ unsigned int tx_descs = priv->tx_ring_size;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Create Rx ring buffer */
+ priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
+ GFP_KERNEL);
+ if (!priv->rx_ring)
+ goto err_rx_ring;
+
+ /* Create Tx ring buffer */
+ priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
+ GFP_KERNEL);
+ if (!priv->tx_ring)
+ goto err_tx_ring;
+
+ priv->tx_cons = 0;
+ priv->tx_prod = 0;
+
+ /* Init Rx ring */
+ for (i = 0; i < rx_descs; i++) {
+ ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
+ priv->rx_dma_buf_sz);
+ if (ret)
+ goto err_init_rx_buffers;
+ }
+
+ priv->rx_cons = 0;
+ priv->rx_prod = 0;
+
+ return 0;
+err_init_rx_buffers:
+ while (--i >= 0)
+ tse_free_rx_buffer(priv, &priv->rx_ring[i]);
+ kfree(priv->tx_ring);
+err_tx_ring:
+ kfree(priv->rx_ring);
+err_rx_ring:
+ return ret;
+}
+
+static void free_skbufs(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int rx_descs = priv->rx_ring_size;
+ unsigned int tx_descs = priv->tx_ring_size;
+ int i;
+
+ /* Release the DMA TX/RX socket buffers */
+ for (i = 0; i < rx_descs; i++)
+ tse_free_rx_buffer(priv, &priv->rx_ring[i]);
+ for (i = 0; i < tx_descs; i++)
+ tse_free_tx_buffer(priv, &priv->tx_ring[i]);
+
+
+ kfree(priv->tx_ring);
+}
+
+/* Reallocate the skb for the reception process
+ */
+static inline void tse_rx_refill(struct altera_tse_private *priv)
+{
+ unsigned int rxsize = priv->rx_ring_size;
+ unsigned int entry;
+ int ret;
+
+ for (; priv->rx_cons - priv->rx_prod > 0;
+ priv->rx_prod++) {
+ entry = priv->rx_prod % rxsize;
+ if (likely(priv->rx_ring[entry].skb == NULL)) {
+ ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
+ priv->rx_dma_buf_sz);
+ if (unlikely(ret != 0))
+ break;
+ priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
+ }
+ }
+}
+
+/* Pull out the VLAN tag and fix up the packet
+ */
+static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ethhdr *eth_hdr;
+ u16 vid;
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ !__vlan_get_tag(skb, &vid)) {
+ eth_hdr = (struct ethhdr *)skb->data;
+ memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+/* Receive a packet: retrieve and pass over to upper levels
+ */
+static int tse_rx(struct altera_tse_private *priv, int limit)
+{
+ unsigned int count = 0;
+ unsigned int next_entry;
+ struct sk_buff *skb;
+ unsigned int entry = priv->rx_cons % priv->rx_ring_size;
+ u32 rxstatus;
+ u16 pktlength;
+ u16 pktstatus;
+
+ while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+ pktstatus = rxstatus >> 16;
+ pktlength = rxstatus & 0xffff;
+
+ if ((pktstatus & 0xFF) || (pktlength == 0))
+ netdev_err(priv->dev,
+ "RCV pktstatus %08X pktlength %08X\n",
+ pktstatus, pktlength);
+
+ count++;
+ next_entry = (++priv->rx_cons) % priv->rx_ring_size;
+
+ skb = priv->rx_ring[entry].skb;
+ if (unlikely(!skb)) {
+ netdev_err(priv->dev,
+ "%s: Inconsistent Rx descriptor chain\n",
+ __func__);
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+ priv->rx_ring[entry].skb = NULL;
+
+ skb_put(skb, pktlength);
+
+ /* make cache consistent with receive packet buffer */
+ dma_sync_single_for_cpu(priv->device,
+ priv->rx_ring[entry].dma_addr,
+ priv->rx_ring[entry].len,
+ DMA_FROM_DEVICE);
+
+ dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
+ priv->rx_ring[entry].len, DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(priv)) {
+ netdev_info(priv->dev, "frame received %d bytes\n",
+ pktlength);
+ print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
+ 16, 1, skb->data, pktlength, true);
+ }
+
+ tse_rx_vlan(priv->dev, skb);
+
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ skb_checksum_none_assert(skb);
+
+ napi_gro_receive(&priv->napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += pktlength;
+
+ entry = next_entry;
+ }
+
+ tse_rx_refill(priv);
+ return count;
+}
+
+/* Reclaim resources after transmission completes
+ */
+static int tse_tx_complete(struct altera_tse_private *priv)
+{
+ unsigned int txsize = priv->tx_ring_size;
+ u32 ready;
+ unsigned int entry;
+ struct tse_buffer *tx_buff;
+ int txcomplete = 0;
+
+ spin_lock(&priv->tx_lock);
+
+ ready = priv->dmaops->tx_completions(priv);
+
+ /* Free sent buffers */
+ while (ready && (priv->tx_cons != priv->tx_prod)) {
+ entry = priv->tx_cons % txsize;
+ tx_buff = &priv->tx_ring[entry];
+
+ if (netif_msg_tx_done(priv))
+ netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
+ __func__, priv->tx_prod, priv->tx_cons);
+
+ if (likely(tx_buff->skb))
+ priv->dev->stats.tx_packets++;
+
+ tse_free_tx_buffer(priv, tx_buff);
+ priv->tx_cons++;
+
+ txcomplete++;
+ ready--;
+ }
+
+ if (unlikely(netif_queue_stopped(priv->dev) &&
+ tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
+ netif_tx_lock(priv->dev);
+ if (netif_queue_stopped(priv->dev) &&
+ tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
+ if (netif_msg_tx_done(priv))
+ netdev_dbg(priv->dev, "%s: restart transmit\n",
+ __func__);
+ netif_wake_queue(priv->dev);
+ }
+ netif_tx_unlock(priv->dev);
+ }
+
+ spin_unlock(&priv->tx_lock);
+ return txcomplete;
+}
+
+/* NAPI polling function
+ */
+static int tse_poll(struct napi_struct *napi, int budget)
+{
+ struct altera_tse_private *priv =
+ container_of(napi, struct altera_tse_private, napi);
+ int rxcomplete = 0;
+ int txcomplete = 0;
+ unsigned long int flags;
+
+ txcomplete = tse_tx_complete(priv);
+
+ rxcomplete = tse_rx(priv, budget);
+
+ if (rxcomplete >= budget || txcomplete > 0)
+ return rxcomplete;
+
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
+
+ netdev_dbg(priv->dev,
+ "NAPI Complete, did %d packets with budget %d\n",
+ txcomplete+rxcomplete, budget);
+
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->enable_rxirq(priv);
+ priv->dmaops->enable_txirq(priv);
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+ return rxcomplete + txcomplete;
+}
+
+/* DMA TX & RX FIFO interrupt routing
+ */
+static irqreturn_t altera_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct altera_tse_private *priv;
+ unsigned long int flags;
+
+
+ if (unlikely(!dev)) {
+ pr_err("%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ priv = netdev_priv(dev);
+
+ /* turn off desc irqs and enable napi rx */
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ priv->dmaops->disable_rxirq(priv);
+ priv->dmaops->disable_txirq(priv);
+ __napi_schedule(&priv->napi);
+ }
+
+ /* reset IRQs */
+ priv->dmaops->clear_rxirq(priv);
+ priv->dmaops->clear_txirq(priv);
+
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/* Transmit a packet (called by the kernel). Dispatches
+ * either the SGDMA method for transmitting or the
+ * MSGDMA method, assumes no scatter/gather support,
+ * implying an assumption that there's only one
+ * physically contiguous fragment starting at
+ * skb->data, for length of skb_headlen(skb).
+ */
+static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int txsize = priv->tx_ring_size;
+ unsigned int entry;
+ struct tse_buffer *buffer = NULL;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int nopaged_len = skb_headlen(skb);
+ enum netdev_tx ret = NETDEV_TX_OK;
+ dma_addr_t dma_addr;
+ int txcomplete = 0;
+
+ spin_lock_bh(&priv->tx_lock);
+
+ if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ netdev_err(priv->dev,
+ "%s: Tx list full when queue awake\n",
+ __func__);
+ }
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Map the first skb fragment */
+ entry = priv->tx_prod % txsize;
+ buffer = &priv->tx_ring[entry];
+
+ dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, dma_addr)) {
+ netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ buffer->skb = skb;
+ buffer->dma_addr = dma_addr;
+ buffer->len = nopaged_len;
+
+ /* Push data out of the cache hierarchy into main memory */
+ dma_sync_single_for_device(priv->device, buffer->dma_addr,
+ buffer->len, DMA_TO_DEVICE);
+
+ txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+
+ skb_tx_timestamp(skb);
+
+ priv->tx_prod++;
+ dev->stats.tx_bytes += skb->len;
+
+ if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
+ if (netif_msg_hw(priv))
+ netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_stop_queue(dev);
+ }
+
+out:
+ spin_unlock_bh(&priv->tx_lock);
+
+ return ret;
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the phydev structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void altera_tse_adjust_link(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ /* only change config if there is a link */
+ spin_lock(&priv->mac_cfg_lock);
+ if (phydev->link) {
+ /* Read old config */
+ u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
+
+ /* Check duplex */
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ cfg_reg |= MAC_CMDCFG_HD_ENA;
+ else
+ cfg_reg &= ~MAC_CMDCFG_HD_ENA;
+
+ netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
+ dev->name, phydev->duplex);
+
+ priv->oldduplex = phydev->duplex;
+ }
+
+ /* Check speed */
+ if (phydev->speed != priv->oldspeed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case 1000:
+ cfg_reg |= MAC_CMDCFG_ETH_SPEED;
+ cfg_reg &= ~MAC_CMDCFG_ENA_10;
+ break;
+ case 100:
+ cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
+ cfg_reg &= ~MAC_CMDCFG_ENA_10;
+ break;
+ case 10:
+ cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
+ cfg_reg |= MAC_CMDCFG_ENA_10;
+ break;
+ default:
+ if (netif_msg_link(priv))
+ netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
+ phydev->speed);
+ break;
+ }
+ priv->oldspeed = phydev->speed;
+ }
+ iowrite32(cfg_reg, &priv->mac_dev->command_config);
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ spin_unlock(&priv->mac_cfg_lock);
+}
+static struct phy_device *connect_local_phy(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ char phy_id_fmt[MII_BUS_ID_SIZE + 3];
+ int ret;
+
+ if (priv->phy_addr != POLL_PHY) {
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
+ priv->mdio->id, priv->phy_addr);
+
+ netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
+
+ phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
+ priv->phy_iface);
+ if (IS_ERR(phydev))
+ netdev_err(dev, "Could not attach to PHY\n");
+
+ } else {
+ phydev = phy_find_first(priv->mdio);
+ if (phydev == NULL) {
+ netdev_err(dev, "No PHY found\n");
+ return phydev;
+ }
+
+ ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
+ priv->phy_iface);
+ if (ret != 0) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ phydev = NULL;
+ }
+ }
+ return phydev;
+}
+
+/* Initialize driver's PHY state, and attach to the PHY
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ struct device_node *phynode;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
+
+ if (!phynode) {
+ netdev_dbg(dev, "no phy-handle found\n");
+ if (!priv->mdio) {
+ netdev_err(dev,
+ "No phy-handle nor local mdio specified\n");
+ return -ENODEV;
+ }
+ phydev = connect_local_phy(dev);
+ } else {
+ netdev_dbg(dev, "phy-handle found\n");
+ phydev = of_phy_connect(dev, phynode,
+ &altera_tse_adjust_link, 0, priv->phy_iface);
+ }
+
+ if (!phydev) {
+ netdev_err(dev, "Could not find the PHY\n");
+ return -ENODEV;
+ }
+
+ /* Stop Advertising 1000BASE Capability if interface is not GMII
+ * Note: Checkpatch throws CHECKs for the camel case defines below,
+ * it's ok to ignore.
+ */
+ if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
+ (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
+ phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ /* Broken HW is sometimes missing the pull-up resistor on the
+ * MDIO line, which results in reads to non-existent devices returning
+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+ * device as well.
+ * Note: phydev->phy_id is the result of reading the UID PHY registers.
+ */
+ if (phydev->phy_id == 0) {
+ netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
+ phy_disconnect(phydev);
+ return -ENODEV;
+ }
+
+ netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
+ phydev->addr, phydev->phy_id, phydev->link);
+
+ priv->phydev = phydev;
+ return 0;
+}
+
+static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
+{
+ struct altera_tse_mac *mac = priv->mac_dev;
+ u32 msb;
+ u32 lsb;
+
+ msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
+
+ /* Set primary MAC address */
+ iowrite32(msb, &mac->mac_addr_0);
+ iowrite32(lsb, &mac->mac_addr_1);
+}
+
+/* MAC software reset.
+ * When reset is triggered, the MAC function completes the current
+ * transmission or reception, and subsequently disables the transmit and
+ * receive logic, flushes the receive FIFO buffer, and resets the statistics
+ * counters.
+ */
+static int reset_mac(struct altera_tse_private *priv)
+{
+ void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
+ int counter;
+ u32 dat;
+
+ dat = ioread32(cmd_cfg_reg);
+ dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
+ dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
+ iowrite32(dat, cmd_cfg_reg);
+
+ counter = 0;
+ while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+ if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+ break;
+ udelay(1);
+ }
+
+ if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+ dat = ioread32(cmd_cfg_reg);
+ dat &= ~MAC_CMDCFG_SW_RESET;
+ iowrite32(dat, cmd_cfg_reg);
+ return -1;
+ }
+ return 0;
+}
+
+/* Initialize MAC core registers
+*/
+static int init_mac(struct altera_tse_private *priv)
+{
+ struct altera_tse_mac *mac = priv->mac_dev;
+ unsigned int cmd = 0;
+ u32 frm_length;
+
+ /* Setup Rx FIFO */
+ iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+ &mac->rx_section_empty);
+ iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
+ iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
+ iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+
+ /* Setup Tx FIFO */
+ iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+ &mac->tx_section_empty);
+ iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
+ iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
+ iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+
+ /* MAC Address Configuration */
+ tse_update_mac_addr(priv, priv->dev->dev_addr);
+
+ /* MAC Function Configuration */
+ frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
+ iowrite32(frm_length, &mac->frm_length);
+ iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+
+ /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
+ * start address
+ */
+ tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+ tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+ ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+
+ /* Set the MAC options */
+ cmd = ioread32(&mac->command_config);
+ cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */
+ cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
+ cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
+ * with CRC errors
+ */
+ cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
+ cmd &= ~MAC_CMDCFG_TX_ENA;
+ cmd &= ~MAC_CMDCFG_RX_ENA;
+ iowrite32(cmd, &mac->command_config);
+
+ if (netif_msg_hw(priv))
+ dev_dbg(priv->device,
+ "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
+
+ return 0;
+}
+
+/* Start/stop MAC transmission logic
+ */
+static void tse_set_mac(struct altera_tse_private *priv, bool enable)
+{
+ struct altera_tse_mac *mac = priv->mac_dev;
+ u32 value = ioread32(&mac->command_config);
+
+ if (enable)
+ value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
+ else
+ value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
+
+ iowrite32(value, &mac->command_config);
+}
+
+/* Change the MTU
+ */
+static int tse_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int max_mtu = priv->max_mtu;
+ unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
+
+ if (netif_running(dev)) {
+ netdev_err(dev, "must be stopped to change its MTU\n");
+ return -EBUSY;
+ }
+
+ if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
+ netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static void altera_tse_set_mcfilter(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ /* clear the hash filter */
+ for (i = 0; i < 64; i++)
+ iowrite32(0, &(mac->hash_table[i]));
+
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int hash = 0;
+ int mac_octet;
+
+ for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
+ unsigned char xor_bit = 0;
+ unsigned char octet = ha->addr[mac_octet];
+ unsigned int bitshift;
+
+ for (bitshift = 0; bitshift < 8; bitshift++)
+ xor_bit ^= ((octet >> bitshift) & 0x01);
+
+ hash = (hash << 1) | xor_bit;
+ }
+ iowrite32(1, &(mac->hash_table[hash]));
+ }
+}
+
+
+static void altera_tse_set_mcfilterall(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+ int i;
+
+ /* set the hash filter */
+ for (i = 0; i < 64; i++)
+ iowrite32(1, &(mac->hash_table[i]));
+}
+
+/* Set or clear the multicast filter for this adaptor
+ */
+static void tse_set_rx_mode_hashfilter(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+
+ spin_lock(&priv->mac_cfg_lock);
+
+ if (dev->flags & IFF_PROMISC)
+ tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+
+ if (dev->flags & IFF_ALLMULTI)
+ altera_tse_set_mcfilterall(dev);
+ else
+ altera_tse_set_mcfilter(dev);
+
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+/* Set or clear the multicast filter for this adaptor
+ */
+static void tse_set_rx_mode(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ struct altera_tse_mac *mac = priv->mac_dev;
+
+ spin_lock(&priv->mac_cfg_lock);
+
+ if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
+ !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
+ tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ else
+ tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+/* Open and initialize the interface
+ */
+static int tse_open(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ int ret = 0;
+ int i;
+ unsigned long int flags;
+
+ /* Reset and configure TSE MAC and probe associated PHY */
+ ret = priv->dmaops->init_dma(priv);
+ if (ret != 0) {
+ netdev_err(dev, "Cannot initialize DMA\n");
+ goto phy_error;
+ }
+
+ if (netif_msg_ifup(priv))
+ netdev_warn(dev, "device MAC address %pM\n",
+ dev->dev_addr);
+
+ if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
+ netdev_warn(dev, "TSE revision %x\n", priv->revision);
+
+ spin_lock(&priv->mac_cfg_lock);
+ ret = reset_mac(priv);
+ if (ret)
+ netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+
+ ret = init_mac(priv);
+ spin_unlock(&priv->mac_cfg_lock);
+ if (ret) {
+ netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
+ goto alloc_skbuf_error;
+ }
+
+ priv->dmaops->reset_dma(priv);
+
+ /* Create and initialize the TX/RX descriptors chains. */
+ priv->rx_ring_size = dma_rx_num;
+ priv->tx_ring_size = dma_tx_num;
+ ret = alloc_init_skbufs(priv);
+ if (ret) {
+ netdev_err(dev, "DMA descriptors initialization failed\n");
+ goto alloc_skbuf_error;
+ }
+
+
+ /* Register RX interrupt */
+ ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
+ dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "Unable to register RX interrupt %d\n",
+ priv->rx_irq);
+ goto init_error;
+ }
+
+ /* Register TX interrupt */
+ ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
+ dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "Unable to register TX interrupt %d\n",
+ priv->tx_irq);
+ goto tx_request_irq_error;
+ }
+
+ /* Enable DMA interrupts */
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->enable_rxirq(priv);
+ priv->dmaops->enable_txirq(priv);
+
+ /* Setup RX descriptor chain */
+ for (i = 0; i < priv->rx_ring_size; i++)
+ priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
+
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+
+ /* Start MAC Rx/Tx */
+ spin_lock(&priv->mac_cfg_lock);
+ tse_set_mac(priv, true);
+ spin_unlock(&priv->mac_cfg_lock);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+tx_request_irq_error:
+ free_irq(priv->rx_irq, dev);
+init_error:
+ free_skbufs(dev);
+alloc_skbuf_error:
+ if (priv->phydev) {
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+phy_error:
+ return ret;
+}
+
+/* Stop TSE MAC interface and put the device in an inactive state
+ */
+static int tse_shutdown(struct net_device *dev)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+ int ret;
+ unsigned long int flags;
+
+ /* Stop and disconnect the PHY */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+
+ /* Disable DMA interrupts */
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->disable_rxirq(priv);
+ priv->dmaops->disable_txirq(priv);
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+
+ /* Free the IRQ lines */
+ free_irq(priv->rx_irq, dev);
+ free_irq(priv->tx_irq, dev);
+
+ /* disable and reset the MAC, empties fifo */
+ spin_lock(&priv->mac_cfg_lock);
+ spin_lock(&priv->tx_lock);
+
+ ret = reset_mac(priv);
+ if (ret)
+ netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+ priv->dmaops->reset_dma(priv);
+ free_skbufs(dev);
+
+ spin_unlock(&priv->tx_lock);
+ spin_unlock(&priv->mac_cfg_lock);
+
+ priv->dmaops->uninit_dma(priv);
+
+ return 0;
+}
+
+static struct net_device_ops altera_tse_netdev_ops = {
+ .ndo_open = tse_open,
+ .ndo_stop = tse_shutdown,
+ .ndo_start_xmit = tse_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = tse_set_rx_mode,
+ .ndo_change_mtu = tse_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+
+static int request_and_map(struct platform_device *pdev, const char *name,
+ struct resource **res, void __iomem **ptr)
+{
+ struct resource *region;
+ struct device *device = &pdev->dev;
+
+ *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (*res == NULL) {
+ dev_err(device, "resource %s not defined\n", name);
+ return -ENODEV;
+ }
+
+ region = devm_request_mem_region(device, (*res)->start,
+ resource_size(*res), dev_name(device));
+ if (region == NULL) {
+ dev_err(device, "unable to request %s\n", name);
+ return -EBUSY;
+ }
+
+ *ptr = devm_ioremap_nocache(device, region->start,
+ resource_size(region));
+ if (*ptr == NULL) {
+ dev_err(device, "ioremap_nocache of %s failed!", name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Probe Altera TSE MAC device
+ */
+static int altera_tse_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ int ret = -ENODEV;
+ struct resource *control_port;
+ struct resource *dma_res;
+ struct altera_tse_private *priv;
+ const unsigned char *macaddr;
+ struct device_node *np = pdev->dev.of_node;
+ void __iomem *descmap;
+ const struct of_device_id *of_id = NULL;
+
+ ndev = alloc_etherdev(sizeof(struct altera_tse_private));
+ if (!ndev) {
+ dev_err(&pdev->dev, "Could not allocate network device\n");
+ return -ENODEV;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv = netdev_priv(ndev);
+ priv->device = &pdev->dev;
+ priv->dev = ndev;
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ of_id = of_match_device(altera_tse_ids, &pdev->dev);
+
+ if (of_id)
+ priv->dmaops = (struct altera_dmaops *)of_id->data;
+
+
+ if (priv->dmaops &&
+ priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
+ /* Get the mapped address to the SGDMA descriptor memory */
+ ret = request_and_map(pdev, "s1", &dma_res, &descmap);
+ if (ret)
+ goto out_free;
+
+ /* Start of that memory is for transmit descriptors */
+ priv->tx_dma_desc = descmap;
+
+ /* First half is for tx descriptors, other half for tx */
+ priv->txdescmem = resource_size(dma_res)/2;
+
+ priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
+
+ priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
+ priv->txdescmem));
+ priv->rxdescmem = resource_size(dma_res)/2;
+ priv->rxdescmem_busaddr = dma_res->start;
+ priv->rxdescmem_busaddr += priv->txdescmem;
+
+ if (upper_32_bits(priv->rxdescmem_busaddr)) {
+ dev_dbg(priv->device,
+ "SGDMA bus addresses greater than 32-bits\n");
+ goto out_free;
+ }
+ if (upper_32_bits(priv->txdescmem_busaddr)) {
+ dev_dbg(priv->device,
+ "SGDMA bus addresses greater than 32-bits\n");
+ goto out_free;
+ }
+ } else if (priv->dmaops &&
+ priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
+ ret = request_and_map(pdev, "rx_resp", &dma_res,
+ &priv->rx_dma_resp);
+ if (ret)
+ goto out_free;
+
+ ret = request_and_map(pdev, "tx_desc", &dma_res,
+ &priv->tx_dma_desc);
+ if (ret)
+ goto out_free;
+
+ priv->txdescmem = resource_size(dma_res);
+ priv->txdescmem_busaddr = dma_res->start;
+
+ ret = request_and_map(pdev, "rx_desc", &dma_res,
+ &priv->rx_dma_desc);
+ if (ret)
+ goto out_free;
+
+ priv->rxdescmem = resource_size(dma_res);
+ priv->rxdescmem_busaddr = dma_res->start;
+
+ } else {
+ goto out_free;
+ }
+
+ if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+ dma_set_coherent_mask(priv->device,
+ DMA_BIT_MASK(priv->dmaops->dmamask));
+ else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+ dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
+ else
+ goto out_free;
+
+ /* MAC address space */
+ ret = request_and_map(pdev, "control_port", &control_port,
+ (void __iomem **)&priv->mac_dev);
+ if (ret)
+ goto out_free;
+
+ /* xSGDMA Rx Dispatcher address space */
+ ret = request_and_map(pdev, "rx_csr", &dma_res,
+ &priv->rx_dma_csr);
+ if (ret)
+ goto out_free;
+
+
+ /* xSGDMA Tx Dispatcher address space */
+ ret = request_and_map(pdev, "tx_csr", &dma_res,
+ &priv->tx_dma_csr);
+ if (ret)
+ goto out_free;
+
+
+ /* Rx IRQ */
+ priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
+ if (priv->rx_irq == -ENXIO) {
+ dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ /* Tx IRQ */
+ priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
+ if (priv->tx_irq == -ENXIO) {
+ dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ /* get FIFO depths from device tree */
+ if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
+ &priv->rx_fifo_depth)) {
+ dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+ &priv->rx_fifo_depth)) {
+ dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ /* get hash filter settings for this instance */
+ priv->hash_filter =
+ of_property_read_bool(pdev->dev.of_node,
+ "altr,has-hash-multicast-filter");
+
+ /* get supplemental address settings for this instance */
+ priv->added_unicast =
+ of_property_read_bool(pdev->dev.of_node,
+ "altr,has-supplementary-unicast");
+
+ /* Max MTU is 1500, ETH_DATA_LEN */
+ priv->max_mtu = ETH_DATA_LEN;
+
+ /* Get the max mtu from the device tree. Note that the
+ * "max-frame-size" parameter is actually max mtu. Definition
+ * in the ePAPR v1.1 spec and usage differ, so go with usage.
+ */
+ of_property_read_u32(pdev->dev.of_node, "max-frame-size",
+ &priv->max_mtu);
+
+ /* The DMA buffer size already accounts for an alignment bias
+ * to avoid unaligned access exceptions for the NIOS processor,
+ */
+ priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
+
+ /* get default MAC address from device tree */
+ macaddr = of_get_mac_address(pdev->dev.of_node);
+ if (macaddr)
+ ether_addr_copy(ndev->dev_addr, macaddr);
+ else
+ eth_hw_addr_random(ndev);
+
+ priv->phy_iface = of_get_phy_mode(np);
+
+ /* try to get PHY address from device tree, use PHY autodetection if
+ * no valid address is given
+ */
+ if (of_property_read_u32(pdev->dev.of_node, "phy-addr",
+ &priv->phy_addr)) {
+ priv->phy_addr = POLL_PHY;
+ }
+
+ if (!((priv->phy_addr == POLL_PHY) ||
+ ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
+ dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
+ priv->phy_addr);
+ goto out_free;
+ }
+
+ /* Create/attach to MDIO bus */
+ ret = altera_tse_mdio_create(ndev,
+ atomic_add_return(1, &instance_count));
+
+ if (ret)
+ goto out_free;
+
+ /* initialize netdev */
+ ether_setup(ndev);
+ ndev->mem_start = control_port->start;
+ ndev->mem_end = control_port->end;
+ ndev->netdev_ops = &altera_tse_netdev_ops;
+ altera_tse_set_ethtool_ops(ndev);
+
+ altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
+
+ if (priv->hash_filter)
+ altera_tse_netdev_ops.ndo_set_rx_mode =
+ tse_set_rx_mode_hashfilter;
+
+ /* Scatter/gather IO is not supported,
+ * so it is turned off
+ */
+ ndev->hw_features &= ~NETIF_F_SG;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+
+ /* VLAN offloading of tagging, stripping and filtering is not
+ * supported by hardware, but driver will accommodate the
+ * extra 4-byte VLAN tag for processing by upper layers
+ */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+ /* setup NAPI interface */
+ netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
+
+ spin_lock_init(&priv->mac_cfg_lock);
+ spin_lock_init(&priv->tx_lock);
+ spin_lock_init(&priv->rxdma_irq_lock);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register TSE net device\n");
+ goto out_free_mdio;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv->revision = ioread32(&priv->mac_dev->megacore_revision);
+
+ if (netif_msg_probe(priv))
+ dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
+ (priv->revision >> 8) & 0xff,
+ priv->revision & 0xff,
+ (unsigned long) control_port->start, priv->rx_irq,
+ priv->tx_irq);
+
+ ret = init_phy(ndev);
+ if (ret != 0) {
+ netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
+ goto out_free_mdio;
+ }
+ return 0;
+
+out_free_mdio:
+ altera_tse_mdio_destroy(ndev);
+out_free:
+ free_netdev(ndev);
+ return ret;
+}
+
+/* Remove Altera TSE MAC device
+ */
+static int altera_tse_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ altera_tse_mdio_destroy(ndev);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+struct altera_dmaops altera_dtype_sgdma = {
+ .altera_dtype = ALTERA_DTYPE_SGDMA,
+ .dmamask = 32,
+ .reset_dma = sgdma_reset,
+ .enable_txirq = sgdma_enable_txirq,
+ .enable_rxirq = sgdma_enable_rxirq,
+ .disable_txirq = sgdma_disable_txirq,
+ .disable_rxirq = sgdma_disable_rxirq,
+ .clear_txirq = sgdma_clear_txirq,
+ .clear_rxirq = sgdma_clear_rxirq,
+ .tx_buffer = sgdma_tx_buffer,
+ .tx_completions = sgdma_tx_completions,
+ .add_rx_desc = sgdma_add_rx_desc,
+ .get_rx_status = sgdma_rx_status,
+ .init_dma = sgdma_initialize,
+ .uninit_dma = sgdma_uninitialize,
+};
+
+struct altera_dmaops altera_dtype_msgdma = {
+ .altera_dtype = ALTERA_DTYPE_MSGDMA,
+ .dmamask = 64,
+ .reset_dma = msgdma_reset,
+ .enable_txirq = msgdma_enable_txirq,
+ .enable_rxirq = msgdma_enable_rxirq,
+ .disable_txirq = msgdma_disable_txirq,
+ .disable_rxirq = msgdma_disable_rxirq,
+ .clear_txirq = msgdma_clear_txirq,
+ .clear_rxirq = msgdma_clear_rxirq,
+ .tx_buffer = msgdma_tx_buffer,
+ .tx_completions = msgdma_tx_completions,
+ .add_rx_desc = msgdma_add_rx_desc,
+ .get_rx_status = msgdma_rx_status,
+ .init_dma = msgdma_initialize,
+ .uninit_dma = msgdma_uninitialize,
+};
+
+static struct of_device_id altera_tse_ids[] = {
+ { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
+ { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
+ { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_tse_ids);
+
+static struct platform_driver altera_tse_driver = {
+ .probe = altera_tse_probe,
+ .remove = altera_tse_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .driver = {
+ .name = ALTERA_TSE_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = altera_tse_ids,
+ },
+};
+
+module_platform_driver(altera_tse_driver);
+
+MODULE_AUTHOR("Altera Corporation");
+MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
new file mode 100644
index 000000000000..70fa13f486b2
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -0,0 +1,44 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "altera_tse.h"
+#include "altera_utils.h"
+
+void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+{
+ u32 value = ioread32(ioaddr);
+ value |= bit_mask;
+ iowrite32(value, ioaddr);
+}
+
+void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+{
+ u32 value = ioread32(ioaddr);
+ value &= ~bit_mask;
+ iowrite32(value, ioaddr);
+}
+
+int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+{
+ u32 value = ioread32(ioaddr);
+ return (value & bit_mask) ? 1 : 0;
+}
+
+int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+{
+ u32 value = ioread32(ioaddr);
+ return (value & bit_mask) ? 0 : 1;
+}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
new file mode 100644
index 000000000000..ce1db36d3583
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -0,0 +1,27 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+
+#ifndef __ALTERA_UTILS_H__
+#define __ALTERA_UTILS_H__
+
+void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+
+#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 18e542f7853d..98a10d555b79 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -578,7 +578,7 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
outs++;
/* Kick the lance: transmit now */
WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
spin_lock_irqsave(&lp->devlock, flags);
if (TX_BUFFS_AVAIL)
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 9793767996a2..87e727b921dc 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -472,7 +472,7 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
netif_stop_queue(dev);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 2061b471fd16..26efaaa5e73f 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -720,6 +720,9 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
int rx_pkt_limit = budget;
unsigned long flags;
+ if (rx_pkt_limit <= 0)
+ goto rx_not_empty;
+
do{
/* process receive packets until we use the quota*/
/* If we own the next entry, it's a new packet. Send it up. */
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 9339cccfe05a..e7cc9174e364 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -549,35 +549,35 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
struct pcnet32_rx_head *new_rx_ring;
struct sk_buff **new_skb_list;
int new, overlap;
+ unsigned int entries = 1 << size;
new_rx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
- (1 << size),
+ entries,
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
- memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
+ memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries);
- new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+ new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
if (!new_dma_addr_list)
goto free_new_rx_ring;
- new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
if (!new_skb_list)
goto free_new_lists;
/* first copy the current receive buffers */
- overlap = min(size, lp->rx_ring_size);
+ overlap = min(entries, lp->rx_ring_size);
for (new = 0; new < overlap; new++) {
new_rx_ring[new] = lp->rx_ring[new];
new_dma_addr_list[new] = lp->rx_dma_addr[new];
new_skb_list[new] = lp->rx_skbuff[new];
}
/* now allocate any new buffers needed */
- for (; new < size; new++) {
+ for (; new < entries; new++) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = new_skb_list[new];
@@ -592,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_dma_addr_list[new] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new])) {
+ netif_err(lp, drv, dev, "%s dma mapping failed\n",
+ __func__);
+ dev_kfree_skb(new_skb_list[new]);
+ goto free_all_new;
+ }
new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
new_rx_ring[new].status = cpu_to_le16(0x8000);
@@ -599,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
/* and free any unneeded buffers */
for (; new < lp->rx_ring_size; new++) {
if (lp->rx_skbuff[new]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[new]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_skbuff[new]);
}
}
@@ -612,7 +623,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
lp->rx_ring_size, lp->rx_ring,
lp->rx_ring_dma_addr);
- lp->rx_ring_size = (1 << size);
+ lp->rx_ring_size = entries;
lp->rx_mod_mask = lp->rx_ring_size - 1;
lp->rx_len_bits = (size << 4);
lp->rx_ring = new_rx_ring;
@@ -624,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
free_all_new:
while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) {
- pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new]))
+ pci_unmap_single(lp->pci_dev,
+ new_dma_addr_list[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(new_skb_list[new]);
}
}
@@ -634,8 +649,7 @@ free_new_lists:
kfree(new_dma_addr_list);
free_new_rx_ring:
pci_free_consistent(lp->pci_dev,
- sizeof(struct pcnet32_rx_head) *
- (1 << size),
+ sizeof(struct pcnet32_rx_head) * entries,
new_rx_ring,
new_ring_dma_addr);
}
@@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
lp->rx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[i],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(lp->rx_skbuff[i]);
}
lp->rx_skbuff[i] = NULL;
@@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
lp->tx_dma_addr[x] =
pci_map_single(lp->pci_dev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "DMA mapping error at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[x].status = cpu_to_le16(status);
@@ -1142,24 +1166,36 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
+ dma_addr_t new_dma_addr;
newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ /*
+ * map the new buffer, if mapping fails, drop the packet and
+ * reuse the old buffer
+ */
if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN);
- skb = lp->rx_skbuff[entry];
- pci_unmap_single(lp->pci_dev,
- lp->rx_dma_addr[entry],
- PKT_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- skb_put(skb, pkt_len);
- lp->rx_skbuff[entry] = newskb;
- lp->rx_dma_addr[entry] =
- pci_map_single(lp->pci_dev,
- newskb->data,
- PKT_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
- rx_in_place = 1;
+ new_dma_addr = pci_map_single(lp->pci_dev,
+ newskb->data,
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
+ netif_err(lp, rx_err, dev,
+ "DMA mapping error.\n");
+ dev_kfree_skb(newskb);
+ skb = NULL;
+ } else {
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ lp->rx_dma_addr[entry] = new_dma_addr;
+ rxp->base = cpu_to_le32(new_dma_addr);
+ rx_in_place = 1;
+ }
} else
skb = NULL;
} else
@@ -2229,9 +2265,12 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->tx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
- lp->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->tx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->tx_skbuff[i]);
}
lp->tx_skbuff[i] = NULL;
@@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev)
}
rmb();
- if (lp->rx_dma_addr[i] == 0)
+ if (lp->rx_dma_addr[i] == 0) {
lp->rx_dma_addr[i] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i])) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev,
+ "%s pci dma mapping error\n",
+ __func__);
+ return -1;
+ }
+ }
lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after all others are visible */
@@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_ring[entry].misc = 0x00000000;
- lp->tx_skbuff[entry] = skb;
lp->tx_dma_addr[entry] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ goto drop_packet;
+ }
+ lp->tx_skbuff[entry] = skb;
lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[entry].status = cpu_to_le16(status);
@@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_full = 1;
netif_stop_queue(dev);
}
+drop_packet:
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 380d24922049..17bb9ce96260 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -535,7 +535,7 @@ static int alx_alloc_descriptors(struct alx_priv *alx)
if (!alx->descmem.virt)
goto out_free;
- alx->txq.tpd = (void *)alx->descmem.virt;
+ alx->txq.tpd = alx->descmem.virt;
alx->txq.tpd_dma = alx->descmem.dma;
/* alignment requirement for next block */
@@ -1097,7 +1097,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
drop:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 4d3258dd0a88..e11bf18fbbd1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -832,7 +832,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
}
static inline void atl1c_clean_buffer(struct pci_dev *pdev,
- struct atl1c_buffer *buffer_info, int in_irq)
+ struct atl1c_buffer *buffer_info)
{
u16 pci_driection;
if (buffer_info->flags & ATL1C_BUFFER_FREE)
@@ -850,12 +850,8 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
pci_unmap_page(pdev, buffer_info->dma,
buffer_info->length, pci_driection);
}
- if (buffer_info->skb) {
- if (in_irq)
- dev_kfree_skb_irq(buffer_info->skb);
- else
- dev_kfree_skb(buffer_info->skb);
- }
+ if (buffer_info->skb)
+ dev_consume_skb_any(buffer_info->skb);
buffer_info->dma = 0;
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
@@ -875,7 +871,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
ring_count = tpd_ring->count;
for (index = 0; index < ring_count; index++) {
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(pdev, buffer_info, 0);
+ atl1c_clean_buffer(pdev, buffer_info);
}
/* Zero out Tx-buffers */
@@ -899,7 +895,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
for (j = 0; j < rfd_ring->count; j++) {
buffer_info = &rfd_ring->buffer_info[j];
- atl1c_clean_buffer(pdev, buffer_info, 0);
+ atl1c_clean_buffer(pdev, buffer_info);
}
/* zero out the descriptor ring */
memset(rfd_ring->desc, 0, rfd_ring->size);
@@ -1562,7 +1558,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
- atl1c_clean_buffer(pdev, buffer_info, 1);
+ atl1c_clean_buffer(pdev, buffer_info);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1977,17 +1973,17 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
struct pci_dev *pdev = adapter->pdev;
+ unsigned short offload_type;
u8 hdr_len;
u32 real_len;
- unsigned short offload_type;
- int err;
if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (unlikely(err))
- return -1;
- }
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
offload_type = skb_shinfo(skb)->gso_type;
if (offload_type & SKB_GSO_TCPV4) {
@@ -2085,7 +2081,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
while (index != tpd_ring->next_to_use) {
tpd = ATL1C_TPD_DESC(tpd_ring, index);
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
+ atl1c_clean_buffer(adpt->pdev, buffer_info);
memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
if (++index == tpd_ring->count)
index = 0;
@@ -2258,7 +2254,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
} else {
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 422aab27ea1b..4345332533ad 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1641,17 +1641,17 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
static int atl1e_tso_csum(struct atl1e_adapter *adapter,
struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
{
+ unsigned short offload_type;
u8 hdr_len;
u32 real_len;
- unsigned short offload_type;
- int err;
if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (unlikely(err))
- return -1;
- }
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
offload_type = skb_shinfo(skb)->gso_type;
if (offload_type & SKB_GSO_TCPV4) {
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 287272dd69da..dfd0e91fa726 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2118,18 +2118,17 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
}
static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
- struct tx_packet_desc *ptpd)
+ struct tx_packet_desc *ptpd)
{
u8 hdr_len, ip_off;
u32 real_len;
- int err;
if (skb_shinfo(skb)->gso_size) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (unlikely(err))
- return -1;
- }
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -2175,7 +2174,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
return 3;
}
}
- return false;
+ return 0;
}
static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 265ce1b752ed..78befb522a52 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -55,6 +55,7 @@ static const char atl2_driver_name[] = "atl2";
static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
static const char atl2_driver_version[] = ATL2_DRV_VERSION;
+static const struct ethtool_ops atl2_ethtool_ops;
MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
@@ -71,8 +72,6 @@ static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
-static void atl2_set_ethtool_ops(struct net_device *netdev);
-
static void atl2_check_options(struct atl2_adapter *adapter);
/**
@@ -1397,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_setup_pcicmd(pdev);
netdev->netdev_ops = &atl2_netdev_ops;
- atl2_set_ethtool_ops(netdev);
+ SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
netdev->watchdog_timeo = 5 * HZ;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -2105,11 +2104,6 @@ static const struct ethtool_ops atl2_ethtool_ops = {
.set_eeprom = atl2_set_eeprom,
};
-static void atl2_set_ethtool_ops(struct net_device *netdev)
-{
- SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
-}
-
#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
(((a) & 0xff00ff00) >> 8))
#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 3f97d9fd0a71..85dbddd03722 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -60,6 +60,17 @@ config BCM63XX_ENET
This driver supports the ethernet MACs in the Broadcom 63xx
MIPS chipset family (BCM63XX).
+config BCMGENET
+ tristate "Broadcom GENET internal MAC support"
+ depends on OF
+ select MII
+ select PHYLIB
+ select FIXED_PHY if BCMGENET=y
+ select BCM7XXX_PHY
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset.
+
config BNX2
tristate "Broadcom NetXtremeII support"
depends on PCI
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 68efa1a3fb88..fd639a0d4c7d 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_BCMGENET) += genet/
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x/
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 8a7bf7dad898..05ba62589017 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1685,7 +1685,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+ start = u64_stats_fetch_begin_irq(&hwstat->syncp);
/* Convert HW stats into rtnl_link_stats64 stats. */
nstat->rx_packets = hwstat->rx_pkts;
@@ -1719,7 +1719,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
/* Carrier lost counter seems to be broken for some devices */
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
- } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
return nstat;
}
@@ -2073,12 +2073,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
do {
data_src = &hwstat->tx_good_octets;
data_dst = data;
- start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+ start = u64_stats_fetch_begin_irq(&hwstat->syncp);
for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
*data_dst++ = *data_src++;
- } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b9a5fb6400d3..a7d11f5565d6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1722,9 +1722,6 @@ static const struct net_device_ops bcm_enet_ops = {
.ndo_set_rx_mode = bcm_enet_set_multicast_list,
.ndo_do_ioctl = bcm_enet_ioctl,
.ndo_change_mtu = bcm_enet_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bcm_enet_netpoll,
-#endif
};
/*
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6c9e1c9bdeb8..a8efb18e42fa 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2886,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = BNX2_NEXT_TX_BD(sw_cons);
tx_bytes += skb->len;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
tx_pkt++;
if (tx_pkt == budget)
break;
@@ -3133,6 +3133,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
+ if (budget <= 0)
+ return rx_pkt;
+
hw_cons = bnx2_get_hw_rx_cons(bnapi);
sw_cons = rxr->rx_cons;
sw_prod = rxr->rx_prod;
@@ -6235,7 +6238,7 @@ bnx2_free_irq(struct bnx2 *bp)
static void
bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
{
- int i, total_vecs, rc;
+ int i, total_vecs;
struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
struct net_device *dev = bp->dev;
const int len = sizeof(bp->irq_tbl[0].name);
@@ -6258,16 +6261,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
#ifdef BCM_CNIC
total_vecs++;
#endif
- rc = -ENOSPC;
- while (total_vecs >= BNX2_MIN_MSIX_VEC) {
- rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
- if (rc <= 0)
- break;
- if (rc > 0)
- total_vecs = rc;
- }
-
- if (rc != 0)
+ total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
+ BNX2_MIN_MSIX_VEC, total_vecs);
+ if (total_vecs < 0)
return;
msix_vecs = total_vecs;
@@ -6640,7 +6636,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -6733,7 +6729,7 @@ dma_error:
PCI_DMA_TODEVICE);
}
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 391f29ef6d2e..4d8f8aba0ea5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -26,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.17-0"
-#define DRV_MODULE_RELDATE "2013/04/11"
+#define DRV_MODULE_VERSION "1.78.19-0"
+#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_DCB 0x8000000
/* regular debug print */
+#define DP_INNER(fmt, ...) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__);
+
#define DP(__mask, fmt, ...) \
do { \
if (unlikely(bp->msg_enable & (__mask))) \
- pr_notice("[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", \
- ##__VA_ARGS__); \
+ DP_INNER(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DP_AND(__mask, fmt, ...) \
+do { \
+ if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
+ DP_INNER(fmt, ##__VA_ARGS__); \
} while (0)
#define DP_CONT(__mask, fmt, ...) \
@@ -1146,10 +1155,6 @@ struct bnx2x_port {
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
/* slow path */
-
-/* slow path work-queue */
-extern struct workqueue_struct *bnx2x_wq;
-
#define BNX2X_MAX_NUM_OF_VFS 64
#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
@@ -1261,6 +1266,7 @@ struct bnx2x_slowpath {
union {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
+ struct tpa_update_ramrod_data tpa_data;
} q_rdata;
union {
@@ -1392,7 +1398,7 @@ struct bnx2x_fw_stats_data {
};
/* Public slow path states */
-enum {
+enum sp_rtnl_flag {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
BNX2X_SP_RTNL_FAN_FAILURE,
@@ -1403,6 +1409,12 @@ enum {
BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_GET_DRV_VERSION,
+};
+
+enum bnx2x_iov_flag {
+ BNX2X_IOV_HANDLE_VF_MSG,
+ BNX2X_IOV_HANDLE_FLR,
};
struct bnx2x_prev_path_list {
@@ -1603,6 +1615,8 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
+ struct delayed_work iov_task;
+
atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task;
@@ -1693,6 +1707,10 @@ struct bnx2x {
struct bnx2x_slowpath *slowpath;
dma_addr_t slowpath_mapping;
+ /* Mechanism protecting the drv_info_to_mcp */
+ struct mutex drv_info_mutex;
+ bool drv_info_mng_owner;
+
/* Total number of FW statistics requests */
u8 fw_stats_num;
@@ -1882,6 +1900,9 @@ struct bnx2x {
/* operation indication for the sp_rtnl task */
unsigned long sp_rtnl_state;
+ /* Indication of the IOV tasks */
+ unsigned long iov_task_state;
+
/* DCBX Negotiation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
@@ -2525,6 +2546,8 @@ enum {
void bnx2x_set_local_cmng(struct bnx2x *bp);
+void bnx2x_update_mng_version(struct bnx2x *bp);
+
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dbcff509dc3f..9261d5313b5b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -61,10 +61,14 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
static int bnx2x_calc_num_queues(struct bnx2x *bp)
{
- return bnx2x_num_queues ?
- min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, netif_get_num_default_rss_queues(),
- BNX2X_MAX_QUEUES(bp));
+ int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
+
+ /* Reduce memory usage in kdump environment by using only one queue */
+ if (reset_devices)
+ nq = 1;
+
+ nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
+ return nq;
}
/**
@@ -868,6 +872,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (unlikely(bp->panic))
return 0;
#endif
+ if (budget <= 0)
+ return rx_pkt;
bd_cons = fp->rx_bd_cons;
bd_prod = fp->rx_bd_prod;
@@ -1638,36 +1644,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)
DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
msix_vec);
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
-
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
+ BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
/*
* reconfigure number of tx/rx queues according to available
* MSI-X vectors
*/
- if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
- /* how less vectors we will have? */
- int diff = msix_vec - rc;
-
- BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
-
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
-
- if (rc) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
- goto no_msix;
- }
- /*
- * decrease number of queues by number of unallocated entries
- */
- bp->num_ethernet_queues -= diff;
- bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
-
- BNX2X_DEV_INFO("New queue configuration set: %d\n",
- bp->num_queues);
- } else if (rc > 0) {
+ if (rc == -ENOSPC) {
/* Get by with single vector */
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
- if (rc) {
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
+ if (rc < 0) {
BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
rc);
goto no_msix;
@@ -1680,8 +1666,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
} else if (rc < 0) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
goto no_msix;
+ } else if (rc < msix_vec) {
+ /* how less vectors we will have? */
+ int diff = msix_vec - rc;
+
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
+
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_ethernet_queues -= diff;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
+ bp->num_queues);
}
bp->flags |= USING_MSIX_FLAG;
@@ -2234,8 +2234,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct stats_counter);
- BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ if (!bp->fw_stats)
+ goto alloc_mem_err;
/* Set shortcuts */
bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
@@ -2802,6 +2804,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (CNIC_ENABLED(bp))
bnx2x_load_cnic(bp);
+ if (IS_PF(bp))
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
/* mark driver is loaded in shmem2 */
u32 val;
@@ -3028,6 +3033,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
+ /* Clear driver version indication in shmem */
+ if (IS_PF(bp))
+ bnx2x_update_mng_version(bp);
+
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
*/
@@ -4370,14 +4379,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
if (!IS_FCOE_IDX(index)) {
/* status blocks */
- if (!CHIP_IS_E1x(bp))
- BNX2X_PCI_ALLOC(sb->e2_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(sb->e1x_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e1x));
+ if (!CHIP_IS_E1x(bp)) {
+ sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ if (!sb->e2_sb)
+ goto alloc_mem_err;
+ } else {
+ sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+ if (!sb->e1x_sb)
+ goto alloc_mem_err;
+ }
}
/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
@@ -4396,35 +4408,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
"allocating tx memory of fp %d cos %d\n",
index, cos);
- BNX2X_ALLOC(txdata->tx_buf_ring,
- sizeof(struct sw_tx_bd) * NUM_TX_BD);
- BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
- &txdata->tx_desc_mapping,
- sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
+ sizeof(struct sw_tx_bd),
+ GFP_KERNEL);
+ if (!txdata->tx_buf_ring)
+ goto alloc_mem_err;
+ txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ if (!txdata->tx_desc_ring)
+ goto alloc_mem_err;
}
}
/* Rx */
if (!skip_rx_queue(bp, index)) {
/* fastpath rx rings: rx_buf rx_desc rx_comp */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
- sizeof(struct sw_rx_bd) * NUM_RX_BD);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
- &bnx2x_fp(bp, index, rx_desc_mapping),
- sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ bnx2x_fp(bp, index, rx_buf_ring) =
+ kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_buf_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_desc_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ if (!bnx2x_fp(bp, index, rx_desc_ring))
+ goto alloc_mem_err;
/* Seed all CQEs by 1s */
- BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
- &bnx2x_fp(bp, index, rx_comp_mapping),
- sizeof(struct eth_fast_path_rx_cqe) *
- NUM_RCQ_BD);
+ bnx2x_fp(bp, index, rx_comp_ring) =
+ BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
+ if (!bnx2x_fp(bp, index, rx_comp_ring))
+ goto alloc_mem_err;
/* SGE ring */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
- sizeof(struct sw_rx_page) * NUM_RX_SGE);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
- &bnx2x_fp(bp, index, rx_sge_mapping),
- BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ bnx2x_fp(bp, index, rx_page_ring) =
+ kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
+ GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_page_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_sge_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ if (!bnx2x_fp(bp, index, rx_sge_ring))
+ goto alloc_mem_err;
/* RX BD ring */
bnx2x_set_next_page_rx_bd(fp);
@@ -4780,12 +4806,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
}
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -4913,3 +4935,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
disable = disable ? 1 : (usec ? 0 : 1);
storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
}
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
+ u32 verbose)
+{
+ smp_mb__before_clear_bit();
+ set_bit(flag, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
+ flag);
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a89a40f88c25..05f4f5f52635 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -47,31 +47,26 @@ extern int bnx2x_num_queues;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
- (unsigned long long)(*y), x); \
- } while (0)
-
-#define BNX2X_PCI_FALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0xFFFFFFFF, size); \
- DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\
- (unsigned long long)(*y), x); \
- } while (0)
-
-#define BNX2X_ALLOC(x, size) \
- do { \
- x = kzalloc(size, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- } while (0)
+#define BNX2X_PCI_ALLOC(y, size) \
+({ \
+ void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ x; \
+})
+#define BNX2X_PCI_FALLOC(y, size) \
+({ \
+ void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) { \
+ memset(x, 0xff, size); \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ } \
+ x; \
+})
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
@@ -1324,4 +1319,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
int bnx2x_drain_tx_queues(struct bnx2x *bp);
void bnx2x_squeeze_objects(struct bnx2x *bp);
+void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
+ u32 verbose);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fdace204b054..97ea5421dd96 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
* as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down)
*/
- if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
}
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 38fc794c1655..b6de05e3149b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2969,8 +2969,9 @@ static void bnx2x_self_test(struct net_device *dev,
#define IS_PORT_STAT(i) \
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
-#define IS_MF_MODE_STAT(bp) \
- (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+#define HIDE_PORT_STAT(bp) \
+ ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
+ IS_VF(bp))
/* ethtool statistics are displayed for all regular ethernet queues and the
* fcoe L2 queue if not disabled
@@ -2992,7 +2993,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
BNX2X_NUM_Q_STATS;
} else
num_strings = 0;
- if (IS_MF_MODE_STAT(bp)) {
+ if (HIDE_PORT_STAT(bp)) {
for (i = 0; i < BNX2X_NUM_STATS; i++)
if (IS_FUNC_STAT(i))
num_strings++;
@@ -3047,7 +3048,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
strcpy(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_stats_arr[i].string);
@@ -3105,7 +3106,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
hw_stats = (u32 *)&bp->eth_stats;
for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
if (bnx2x_stats_arr[i].size == 0) {
/* skip this counter */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 84aecdf06f7a..95dc36543548 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -87,7 +87,6 @@
(IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index cf1df8b62e2c..5ba8af50c84f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2003,6 +2003,23 @@ struct shmem_lfa {
#define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
};
+/* Used to support NSCI get OS driver version
+ * on driver load the version value will be set
+ * on driver unload driver value of 0x0 will be set.
+ */
+struct os_drv_ver {
+#define DRV_VER_NOT_LOADED 0
+
+ /* personalties order is important */
+#define DRV_PERS_ETHERNET 0
+#define DRV_PERS_ISCSI 1
+#define DRV_PERS_FCOE 2
+
+ /* shmem2 struct is constant can't add more personalties here */
+#define MAX_DRV_PERS 3
+ u32 versions[MAX_DRV_PERS];
+};
+
struct ncsi_oem_fcoe_features {
u32 fcoe_features1;
#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@@ -2217,6 +2234,18 @@ struct shmem2_region {
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
#define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
+
+ u32 reserved5[2];
+ u32 reserved6[PORT_MAX];
+
+ /* driver version for each personality */
+ struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
+
+ /* Flag to the driver that PF's drv_info_host_addr buffer was read */
+ u32 mfw_drv_indication;
+
+ /* We use indication for each PF (0..3) */
+#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
};
@@ -2848,7 +2877,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_REVISION_VERSION 19
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7d4382286457..a78edaccceee 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -120,7 +120,8 @@ static int debug;
module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
-struct workqueue_struct *bnx2x_wq;
+static struct workqueue_struct *bnx2x_wq;
+struct workqueue_struct *bnx2x_iov_wq;
struct bnx2x_mac_vals {
u32 xmac_addr;
@@ -918,7 +919,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
u16 start = 0, end = 0;
u8 cos;
#endif
- if (disable_int)
+ if (IS_PF(bp) && disable_int)
bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
@@ -929,33 +930,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Indices */
/* Common */
- BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
- bp->def_idx, bp->def_att_idx, bp->attn_state,
- bp->spq_prod_idx, bp->stats_counter);
- BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
- bp->def_status_blk->atten_status_block.attn_bits,
- bp->def_status_blk->atten_status_block.attn_bits_ack,
- bp->def_status_blk->atten_status_block.status_block_id,
- bp->def_status_blk->atten_status_block.attn_bits_index);
- BNX2X_ERR(" def (");
- for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
- pr_cont("0x%x%s",
- bp->def_status_blk->sp_sb.index_values[i],
- (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
-
- for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
- *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
- i*sizeof(u32));
-
- pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
- sp_sb_data.igu_sb_id,
- sp_sb_data.igu_seg_id,
- sp_sb_data.p_func.pf_id,
- sp_sb_data.p_func.vnic_id,
- sp_sb_data.p_func.vf_id,
- sp_sb_data.p_func.vf_valid,
- sp_sb_data.state);
+ if (IS_PF(bp)) {
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ int data_size, cstorm_offset;
+
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ def_sb->atten_status_block.attn_bits,
+ def_sb->atten_status_block.attn_bits_ack,
+ def_sb->atten_status_block.status_block_id,
+ def_sb->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ def_sb->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ data_size = sizeof(struct hc_sp_status_block_data) /
+ sizeof(u32);
+ cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
+ for (i = 0; i < data_size; i++)
+ *((u32 *)&sp_sb_data + i) =
+ REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
+ i * sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+ }
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -1013,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
pr_cont("0x%x%s",
fp->sb_index_values[j],
(j == loop - 1) ? ")" : " ");
+
+ /* VF cannot access FW refelection for status block */
+ if (IS_VF(bp))
+ continue;
+
/* fw sb data */
data_size = CHIP_IS_E1x(bp) ?
sizeof(struct hc_status_block_data_e1x) :
@@ -1064,16 +1078,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
#ifdef BNX2X_STOP_ON_ERROR
-
- /* event queue */
- BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
- for (i = 0; i < NUM_EQ_DESC; i++) {
- u32 *data = (u32 *)&bp->eq_ring[i].message.data;
-
- BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
- i, bp->eq_ring[i].message.opcode,
- bp->eq_ring[i].message.error);
- BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+ if (IS_PF(bp)) {
+ /* event queue */
+ BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n",
+ data[0], data[1], data[2]);
+ }
}
/* Rings */
@@ -1140,8 +1156,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
}
#endif
- bnx2x_fw_dump(bp);
- bnx2x_mc_assert(bp);
+ if (IS_PF(bp)) {
+ bnx2x_fw_dump(bp);
+ bnx2x_mc_assert(bp);
+ }
BNX2X_ERR("end crash dump -----------------\n");
}
@@ -1814,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
drv_cmd = BNX2X_Q_CMD_EMPTY;
break;
+ case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+ DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ break;
+
default:
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
command, fp->index);
@@ -1834,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
return;
#endif
- /* SRIOV: reschedule any 'in_progress' operations */
- bnx2x_iov_sp_event(bp, cid, true);
smp_mb__before_atomic_inc();
atomic_inc(&bp->cq_spq_left);
@@ -3460,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
}
+#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
+#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
+
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+ bool release = false;
+ int wait;
/* if drv_info version supported by MFW doesn't match - send NACK */
if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
@@ -3474,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
DRV_INFO_CONTROL_OP_CODE_SHIFT;
+ /* Must prevent other flows from accessing drv_info_to_mcp */
+ mutex_lock(&bp->drv_info_mutex);
+
memset(&bp->slowpath->drv_info_to_mcp, 0,
sizeof(union drv_info_to_mcp));
@@ -3490,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
default:
/* if op code isn't supported - send NACK */
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
- return;
+ goto out;
}
/* if we got drv_info attn from MFW then these fields are defined in
@@ -3502,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+
+ /* Since possible management wants both this and get_driver_version
+ * need to wait until management notifies us it finished utilizing
+ * the buffer.
+ */
+ if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
+ DP(BNX2X_MSG_MCP, "Management does not support indication\n");
+ } else if (!bp->drv_info_mng_owner) {
+ u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
+
+ for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
+ u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
+
+ /* Management is done; need to clear indication */
+ if (indication & bit) {
+ SHMEM2_WR(bp, mfw_drv_indication,
+ indication & ~bit);
+ release = true;
+ break;
+ }
+
+ msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
+ }
+ }
+ if (!release) {
+ DP(BNX2X_MSG_MCP, "Management did not release indication\n");
+ bp->drv_info_mng_owner = true;
+ }
+
+out:
+ mutex_unlock(&bp->drv_info_mutex);
+}
+
+static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
+{
+ u8 vals[4];
+ int i = 0;
+
+ if (bnx2x_format) {
+ i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ if (i > 0)
+ vals[0] -= '0';
+ } else {
+ i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
+ &vals[0], &vals[1], &vals[2], &vals[3]);
+ }
+
+ while (i < 4)
+ vals[i++] = 0;
+
+ return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
+}
+
+void bnx2x_update_mng_version(struct bnx2x *bp)
+{
+ u32 iscsiver = DRV_VER_NOT_LOADED;
+ u32 fcoever = DRV_VER_NOT_LOADED;
+ u32 ethver = DRV_VER_NOT_LOADED;
+ int idx = BP_FW_MB_IDX(bp);
+ u8 *version;
+
+ if (!SHMEM2_HAS(bp, func_os_drv_ver))
+ return;
+
+ mutex_lock(&bp->drv_info_mutex);
+ /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
+ if (bp->drv_info_mng_owner)
+ goto out;
+
+ if (bp->state != BNX2X_STATE_OPEN)
+ goto out;
+
+ /* Parse ethernet driver version */
+ ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+ if (!CNIC_LOADED(bp))
+ goto out;
+
+ /* Try getting storage driver version via cnic */
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_iscsi_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
+ iscsiver = bnx2x_update_mng_version_utility(version, false);
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+ bnx2x_drv_info_fcoe_stat(bp);
+ version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
+ fcoever = bnx2x_update_mng_version_utility(version, false);
+
+out:
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
+ SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
+
+ mutex_unlock(&bp->drv_info_mutex);
+
+ DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
+ ethver, iscsiver, fcoever);
}
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
@@ -3644,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid));
- type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
-
- type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
- SPE_HDR_FUNCTION_ID);
+ /* In some cases, type may already contain the func-id
+ * mainly in SRIOV related use cases, so we add it here only
+ * if it's not already set.
+ */
+ if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+ SPE_HDR_CONN_TYPE;
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+ } else {
+ type = cmd_type;
+ }
spe->hdr.type = cpu_to_le16(type);
@@ -3878,10 +4015,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
}
static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -4025,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
bnx2x_handle_drv_info_req(bp);
if (val & DRV_STATUS_VF_DISABLED)
- bnx2x_vf_handle_flr_event(bp);
+ bnx2x_schedule_iov_task(bp,
+ BNX2X_IOV_HANDLE_FLR);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -5216,14 +5351,14 @@ static void bnx2x_eq_int(struct bnx2x *bp)
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_VF_PF_CHANNEL:
- DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
- bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
+ bnx2x_vf_mbx_schedule(bp,
+ &elem->message.data.vf_pf_event);
continue;
case EVENT_RING_OPCODE_STAT_QUERY:
- DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
- "got statistics comp event %d\n",
- bp->stats_comp++);
+ DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
+ "got statistics comp event %d\n",
+ bp->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -5273,6 +5408,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
} else {
+ int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
+
DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
"AFEX: ramrod completed FUNCTION_UPDATE\n");
f_obj->complete_cmd(bp, f_obj,
@@ -5282,12 +5419,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* sp_rtnl task as all Queue SP operations
* should run under rtnl_lock.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, cmd, 0);
}
goto next_spqe;
@@ -5435,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work)
le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
}
- /* must be called after the EQ processing (since eq leads to sriov
- * ramrod completion flows).
- * This flow may have been scheduled by the arrival of a ramrod
- * completion, or by the sriov code rescheduling itself.
- */
- bnx2x_iov_sp_task(bp);
-
/* afex - poll to check if VIFSET_ACK should be sent to MFW */
if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
&bp->sp_state)) {
@@ -6005,18 +6130,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
{
int i;
- if (IS_MF_SI(bp))
- /*
- * In switch independent mode, the TSTORM needs to accept
- * packets that failed classification, since approximate match
- * mac addresses aren't written to NIG LLH
- */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
- else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
-
/* Zero this manually as its initialization is
currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -7989,19 +8102,25 @@ void bnx2x_free_mem(struct bnx2x *bp)
int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
- if (!CHIP_IS_E1x(bp))
+ if (!CHIP_IS_E1x(bp)) {
/* size = the status block + ramrod buffers */
- BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
- &bp->cnic_sb_mapping,
- sizeof(struct
- host_hc_status_block_e1x));
+ bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ if (!bp->cnic_sb.e2_sb)
+ goto alloc_mem_err;
+ } else {
+ bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+ if (!bp->cnic_sb.e1x_sb)
+ goto alloc_mem_err;
+ }
- if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
+ if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
/* allocate searcher T2 table, as it wasn't allocated before */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
/* write address to which L5 should insert its values */
bp->cnic_eth_dev.addr_drv_info_to_mcp =
@@ -8022,15 +8141,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
{
int i, allocated, context_size;
- if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
+ if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
/* allocate searcher T2 table */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
- BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
- sizeof(struct host_sp_status_block));
+ bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+ if (!bp->def_status_blk)
+ goto alloc_mem_err;
- BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
- sizeof(struct bnx2x_slowpath));
+ bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+ if (!bp->slowpath)
+ goto alloc_mem_err;
/* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
@@ -8050,12 +8176,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
for (i = 0, allocated = 0; allocated < context_size; i++) {
bp->context[i].size = min(CDU_ILT_PAGE_SZ,
(context_size - allocated));
- BNX2X_PCI_ALLOC(bp->context[i].vcxt,
- &bp->context[i].cxt_mapping,
- bp->context[i].size);
+ bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ if (!bp->context[i].vcxt)
+ goto alloc_mem_err;
allocated += bp->context[i].size;
}
- BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+ bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
+ GFP_KERNEL);
+ if (!bp->ilt->lines)
+ goto alloc_mem_err;
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
goto alloc_mem_err;
@@ -8064,11 +8194,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
goto alloc_mem_err;
/* Slow path ring */
- BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+ bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
+ if (!bp->spq)
+ goto alloc_mem_err;
/* EQ */
- BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
- BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ if (!bp->eq_ring)
+ goto alloc_mem_err;
return 0;
@@ -8849,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
synchronize_irq(bp->pdev->irq);
flush_workqueue(bnx2x_wq);
+ flush_workqueue(bnx2x_iov_wq);
while (bnx2x_func_get_state(bp, &bp->func_obj) !=
BNX2X_F_STATE_STARTED && tout--)
@@ -9774,6 +9909,10 @@ sp_rtnl_not_reset:
bnx2x_dcbx_resume_hw_tx(bp);
}
+ if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
+ &bp->sp_rtnl_state))
+ bnx2x_update_mng_version(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -11724,12 +11863,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
+ mutex_init(&bp->drv_info_mutex);
+ bp->drv_info_mng_owner = false;
spin_lock_init(&bp->stats_lock);
sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+ INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
if (IS_PF(bp)) {
rc = bnx2x_get_hwinfo(bp);
if (rc)
@@ -11771,6 +11913,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->disable_tpa = disable_tpa;
bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
+ /* Reduce memory usage in kdump environment by disabling TPA */
+ bp->disable_tpa |= reset_devices;
/* Set TPA flags */
if (bp->disable_tpa) {
@@ -11942,7 +12086,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
{
int mc_count = netdev_mc_count(bp->dev);
struct bnx2x_mcast_list_elem *mc_mac =
- kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+ kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
struct netdev_hw_addr *ha;
if (!mc_mac)
@@ -12064,11 +12208,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
return;
} else {
/* Schedule an SP task to handle rest of change */
- DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
+ NETIF_MSG_IFUP);
}
}
@@ -12101,11 +12242,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
/* configuring mcast to a vf involves sleeping (when we
* wait for the pf's response).
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_VFPF_MCAST, 0);
}
}
@@ -13356,11 +13494,18 @@ static int __init bnx2x_init(void)
pr_err("Cannot create workqueue\n");
return -ENOMEM;
}
+ bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
+ if (!bnx2x_iov_wq) {
+ pr_err("Cannot create iov workqueue\n");
+ destroy_workqueue(bnx2x_wq);
+ return -ENOMEM;
+ }
ret = pci_register_driver(&bnx2x_pci_driver);
if (ret) {
pr_err("Cannot register driver\n");
destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
}
return ret;
}
@@ -13372,6 +13517,7 @@ static void __exit bnx2x_cleanup(void)
pci_unregister_driver(&bnx2x_pci_driver);
destroy_workqueue(bnx2x_wq);
+ destroy_workqueue(bnx2x_iov_wq);
/* Free globally allocated resources */
list_for_each_safe(pos, q, &bnx2x_prev_list) {
@@ -13765,6 +13911,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
REG_WR(bp, scratch_offset + i,
*(host_addr + i/4));
}
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@@ -13782,6 +13929,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
}
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
break;
}
@@ -13887,6 +14035,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
rcu_assign_pointer(bp->cnic_ops, ops);
+ /* Schedule driver to read CNIC driver versions */
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0fb6ff2ac8e3..31297266b743 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss;
}
-int validate_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac)
-{
- if (!vlan_mac->get_n_elements) {
- BNX2X_ERR("vlan mac object was not intialized\n");
- return -EINVAL;
- }
- return 0;
-}
-
/********************** Queue state object ***********************************/
/**
@@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
bnx2x_q_fill_setup_data_e2(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_update_data(bp, o, update_params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
o->cids[cid_index], U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
return bnx2x_q_send_update(bp, params);
}
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_tpa_params *params,
+ struct tpa_update_ramrod_data *data)
+{
+ data->client_id = obj->cl_id;
+ data->complete_on_both_clients = params->complete_on_both_clients;
+ data->dont_verify_rings_pause_thr_flg =
+ params->dont_verify_thr;
+ data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+ data->max_sges_for_packet = params->max_sges_pkt;
+ data->max_tpa_queues = params->max_tpa_queues;
+ data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+ data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+ data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+ data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+ data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+ data->tpa_mode = params->tpa_mode;
+ data->update_ipv4 = params->update_ipv4;
+ data->update_ipv6 = params->update_ipv6;
+}
+
static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
struct bnx2x_queue_state_params *params)
{
- /* TODO: Not implemented yet. */
- return -1;
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tpa_update_ramrod_data *rdata =
+ (struct tpa_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_tpa_params *update_tpa_params =
+ &params->params.update_tpa;
+ u16 type;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+ /* Add the function id inside the type, so that sp post function
+ * doesn't automatically add the PF func-id, this is required
+ * for operations done by PFs on behalf of their VFs
+ */
+ type = ETH_CONNECTION_TYPE |
+ ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), type);
}
static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
rdata->tx_switch_suspend = switch_update_params->suspend;
rdata->echo = SWITCH_UPDATE;
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
rdata->allowed_priorities = afex_update_params->allowed_priorities;
rdata->echo = AFEX_UPDATE;
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
DP(BNX2X_MSG_SP,
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 00d7f214a40a..80f6c790ed88 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
u8 cid_index;
};
+struct bnx2x_queue_update_tpa_params {
+ dma_addr_t sge_map;
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_pkt;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u8 _pad;
+
+ u16 sge_buff_sz;
+ u16 max_agg_sz;
+
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+};
+
struct rxq_pause_params {
u16 bd_th_lo;
u16 bd_th_hi;
@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
/* Params according to the current command */
union {
struct bnx2x_queue_update_params update;
+ struct bnx2x_queue_update_tpa_params update_tpa;
struct bnx2x_queue_setup_params setup;
struct bnx2x_queue_init_params init;
struct bnx2x_queue_setup_tx_only_params tx_only;
@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
-int validate_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e42f48df6e94..5c523b32db70 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -102,82 +102,22 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
mmiowb();
barrier();
}
-/* VFOP - VF slow-path operation support */
-#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
+static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ bool print_err)
+{
+ if (!bnx2x_leading_vfq(vf, sp_initialized)) {
+ if (print_err)
+ BNX2X_ERR("Slowpath objects not yet initialized!\n");
+ else
+ DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
+ return false;
+ }
+ return true;
+}
/* VFOP operations states */
-enum bnx2x_vfop_qctor_state {
- BNX2X_VFOP_QCTOR_INIT,
- BNX2X_VFOP_QCTOR_SETUP,
- BNX2X_VFOP_QCTOR_INT_EN
-};
-
-enum bnx2x_vfop_qdtor_state {
- BNX2X_VFOP_QDTOR_HALT,
- BNX2X_VFOP_QDTOR_TERMINATE,
- BNX2X_VFOP_QDTOR_CFCDEL,
- BNX2X_VFOP_QDTOR_DONE
-};
-
-enum bnx2x_vfop_vlan_mac_state {
- BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
- BNX2X_VFOP_VLAN_MAC_CLEAR,
- BNX2X_VFOP_VLAN_MAC_CHK_DONE,
- BNX2X_VFOP_MAC_CONFIG_LIST,
- BNX2X_VFOP_VLAN_CONFIG_LIST,
- BNX2X_VFOP_VLAN_CONFIG_LIST_0
-};
-
-enum bnx2x_vfop_qsetup_state {
- BNX2X_VFOP_QSETUP_CTOR,
- BNX2X_VFOP_QSETUP_VLAN0,
- BNX2X_VFOP_QSETUP_DONE
-};
-
-enum bnx2x_vfop_mcast_state {
- BNX2X_VFOP_MCAST_DEL,
- BNX2X_VFOP_MCAST_ADD,
- BNX2X_VFOP_MCAST_CHK_DONE
-};
-enum bnx2x_vfop_qflr_state {
- BNX2X_VFOP_QFLR_CLR_VLAN,
- BNX2X_VFOP_QFLR_CLR_MAC,
- BNX2X_VFOP_QFLR_TERMINATE,
- BNX2X_VFOP_QFLR_DONE
-};
-
-enum bnx2x_vfop_flr_state {
- BNX2X_VFOP_FLR_QUEUES,
- BNX2X_VFOP_FLR_HW
-};
-
-enum bnx2x_vfop_close_state {
- BNX2X_VFOP_CLOSE_QUEUES,
- BNX2X_VFOP_CLOSE_HW
-};
-
-enum bnx2x_vfop_rxmode_state {
- BNX2X_VFOP_RXMODE_CONFIG,
- BNX2X_VFOP_RXMODE_DONE
-};
-
-enum bnx2x_vfop_qteardown_state {
- BNX2X_VFOP_QTEARDOWN_RXMODE,
- BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
- BNX2X_VFOP_QTEARDOWN_CLR_MAC,
- BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
- BNX2X_VFOP_QTEARDOWN_QDTOR,
- BNX2X_VFOP_QTEARDOWN_DONE
-};
-
-enum bnx2x_vfop_rss_state {
- BNX2X_VFOP_RSS_CONFIG,
- BNX2X_VFOP_RSS_DONE
-};
-
-#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
-
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
struct bnx2x_queue_setup_params *setup_params,
@@ -221,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
- struct bnx2x_vfop_qctor_params *p,
+ struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type)
{
struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
@@ -290,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
}
}
-/* VFOP queue construction */
-static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_queue_create(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
- struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
- enum bnx2x_vfop_qctor_state state = vfop->state;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_QCTOR_INIT:
-
- /* has this queue already been opened? */
- if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
- BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- DP(BNX2X_MSG_IOV,
- "Entered qctor but queue was already up. Aborting gracefully\n");
- goto op_done;
- }
-
- /* next state */
- vfop->state = BNX2X_VFOP_QCTOR_SETUP;
-
- q_params->cmd = BNX2X_Q_CMD_INIT;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_QCTOR_SETUP:
- /* next state */
- vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
-
- /* copy pre-prepared setup params to the queue-state params */
- vfop->op_p->qctor.qstate.params.setup =
- vfop->op_p->qctor.prep_qsetup;
-
- q_params->cmd = BNX2X_Q_CMD_SETUP;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
+ struct bnx2x_queue_state_params *q_params;
+ int rc = 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- case BNX2X_VFOP_QCTOR_INT_EN:
+ /* Prepare ramrod information */
+ q_params = &qctor->qstate;
+ q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
- /* enable interrupts */
- bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
- USTORM_ID, 0, IGU_INT_ENABLE, 0);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
+ goto out;
}
-op_err:
- BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
- vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
-op_done:
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- return;
-}
-static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ /* Run Queue 'construction' ramrods */
+ q_params->cmd = BNX2X_Q_CMD_INIT;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
- vfop->args.qctor.qid = qid;
- vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
+ memcpy(&q_params->params.setup, &qctor->prep_qsetup,
+ sizeof(struct bnx2x_queue_setup_params));
+ q_params->cmd = BNX2X_Q_CMD_SETUP;
+ rc = bnx2x_queue_state_change(bp, q_params);
+ if (rc)
+ goto out;
- bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
- bnx2x_vfop_qctor, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
- cmd->block);
- }
- return -ENOMEM;
+ /* enable interrupts */
+ bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
+ USTORM_ID, 0, IGU_INT_ENABLE, 0);
+out:
+ return rc;
}
-/* VFOP queue destruction */
-static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
- struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
- enum bnx2x_vfop_qdtor_state state = vfop->state;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_QDTOR_HALT:
-
- /* has this queue already been stopped? */
- if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
- BNX2X_Q_LOGICAL_STATE_STOPPED) {
- DP(BNX2X_MSG_IOV,
- "Entered qdtor but queue was already stopped. Aborting gracefully\n");
-
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_DONE;
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
-
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
-
- q_params->cmd = BNX2X_Q_CMD_HALT;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_QDTOR_TERMINATE:
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
-
- q_params->cmd = BNX2X_Q_CMD_TERMINATE;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
+ enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
+ BNX2X_Q_CMD_TERMINATE,
+ BNX2X_Q_CMD_CFC_DEL};
+ struct bnx2x_queue_state_params q_params;
+ int rc, i;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- case BNX2X_VFOP_QDTOR_CFCDEL:
- /* next state */
- vfop->state = BNX2X_VFOP_QDTOR_DONE;
+ /* Prepare ramrod information */
+ memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
+ q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
- vfop->rc = bnx2x_queue_state_change(bp, q_params);
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED) {
+ DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
+ goto out;
+ }
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
- BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
- vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
-op_done:
- case BNX2X_VFOP_QDTOR_DONE:
- /* invalidate the context */
- if (qdtor->cxt) {
- qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
- qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ /* Run Queue 'destruction' ramrods */
+ for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+ q_params.cmd = cmds[i];
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
+ return rc;
}
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
}
-op_pending:
- return;
-}
-
-static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_queue_state_params *qstate =
- &vf->op_params.qctor.qstate;
-
- memset(qstate, 0, sizeof(*qstate));
- qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
-
- vfop->args.qdtor.qid = qid;
- vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
-
- bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
- bnx2x_vfop_qdtor, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
- cmd->block);
- } else {
- BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
- return -ENOMEM;
+out:
+ /* Clean Context */
+ if (bnx2x_vfq(vf, qid, cxt)) {
+ bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
+ bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
}
+
+ return 0;
}
static void
@@ -496,751 +330,293 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
BP_VFDB(bp)->vf_sbs_pool++;
}
-/* VFOP MAC/VLAN helpers */
-static inline void bnx2x_vfop_credit(struct bnx2x *bp,
- struct bnx2x_vfop *vfop,
- struct bnx2x_vlan_mac_obj *obj)
+static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *obj,
+ atomic_t *counter)
{
- struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
-
- /* update credit only if there is no error
- * and a valid credit counter
- */
- if (!vfop->rc && args->credit) {
- struct list_head *pos;
- int read_lock;
- int cnt = 0;
-
- read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
- if (read_lock)
- DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
+ struct list_head *pos;
+ int read_lock;
+ int cnt = 0;
- list_for_each(pos, &obj->head)
- cnt++;
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+ if (read_lock)
+ DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
- if (!read_lock)
- bnx2x_vlan_mac_h_read_unlock(bp, obj);
+ list_for_each(pos, &obj->head)
+ cnt++;
- atomic_set(args->credit, cnt);
- }
-}
-
-static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
- struct bnx2x_vfop_filter *pos,
- struct bnx2x_vlan_mac_data *user_req)
-{
- user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
- BNX2X_VLAN_MAC_DEL;
+ if (!read_lock)
+ bnx2x_vlan_mac_h_read_unlock(bp, obj);
- switch (pos->type) {
- case BNX2X_VFOP_FILTER_MAC:
- memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
- break;
- case BNX2X_VFOP_FILTER_VLAN:
- user_req->u.vlan.vlan = pos->vid;
- break;
- default:
- BNX2X_ERR("Invalid filter type, skipping\n");
- return 1;
- }
- return 0;
+ atomic_set(counter, cnt);
}
-static int bnx2x_vfop_config_list(struct bnx2x *bp,
- struct bnx2x_vfop_filters *filters,
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
+static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, bool drv_only, bool mac)
{
- struct bnx2x_vfop_filter *pos, *tmp;
- struct list_head rollback_list, *filters_list = &filters->head;
- struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
- int rc = 0, cnt = 0;
-
- INIT_LIST_HEAD(&rollback_list);
-
- list_for_each_entry_safe(pos, tmp, filters_list, link) {
- if (bnx2x_vfop_set_user_req(bp, pos, user_req))
- continue;
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
+ int rc;
- rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- if (rc >= 0) {
- cnt += pos->add ? 1 : -1;
- list_move(&pos->link, &rollback_list);
- rc = 0;
- } else if (rc == -EEXIST) {
- rc = 0;
- } else {
- BNX2X_ERR("Failed to add a new vlan_mac command\n");
- break;
- }
- }
+ DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
+ mac ? "MACs" : "VLANs");
- /* rollback if error or too many rules added */
- if (rc || cnt > filters->add_cnt) {
- BNX2X_ERR("error or too many rules added. Performing rollback\n");
- list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
- pos->add = !pos->add; /* reverse op */
- bnx2x_vfop_set_user_req(bp, pos, user_req);
- bnx2x_config_vlan_mac(bp, vlan_mac);
- list_del(&pos->link);
- }
- cnt = 0;
- if (!rc)
- rc = -EINVAL;
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (mac) {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ } else {
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
}
- filters->add_cnt = cnt;
- return rc;
-}
-
-/* VFOP set VLAN/MAC */
-static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
- struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
- struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
-
- enum bnx2x_vfop_vlan_mac_state state = vfop->state;
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- bnx2x_vfop_reset_wq(vf);
-
- switch (state) {
- case BNX2X_VFOP_VLAN_MAC_CLEAR:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do delete */
- vfop->rc = obj->delete_all(bp, obj,
- &vlan_mac->user_req.vlan_mac_flags,
- &vlan_mac->ramrod_flags);
-
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do config */
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- if (vfop->rc == -EEXIST)
- vfop->rc = 0;
+ ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
- vfop->rc = !!obj->raw.check_pending(&obj->raw);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
- case BNX2X_VFOP_MAC_CONFIG_LIST:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do list config */
- vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
- if (vfop->rc)
- goto op_err;
-
- set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_VLAN_CONFIG_LIST:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- /* do list config */
- vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
- if (!vfop->rc) {
- set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
- vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- }
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
- default:
- bnx2x_vfop_default(state);
+ /* Start deleting */
+ rc = ramrod.vlan_mac_obj->delete_all(bp,
+ ramrod.vlan_mac_obj,
+ &ramrod.user_req.vlan_mac_flags,
+ &ramrod.ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("Failed to delete all %s\n",
+ mac ? "MACs" : "VLANs");
+ return rc;
}
-op_err:
- BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
-op_done:
- kfree(filters);
- bnx2x_vfop_credit(bp, vfop, obj);
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- return;
-}
-
-struct bnx2x_vfop_vlan_mac_flags {
- bool drv_only;
- bool dont_consume;
- bool single_cmd;
- bool add;
-};
-
-static void
-bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
- struct bnx2x_vfop_vlan_mac_flags *flags)
-{
- struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
-
- memset(ramrod, 0, sizeof(*ramrod));
- /* ramrod flags */
- if (flags->drv_only)
- set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
- if (flags->single_cmd)
- set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
+ /* Clear the vlan counters */
+ if (!mac)
+ atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
- /* mac_vlan flags */
- if (flags->dont_consume)
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
-
- /* cmd */
- ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
-}
-
-static inline void
-bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
- struct bnx2x_vfop_vlan_mac_flags *flags)
-{
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
- set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
+ return 0;
}
-static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, bool drv_only)
+static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_mac_vlan_filter *filter,
+ bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ struct bnx2x_vlan_mac_ramrod_params ramrod;
int rc;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = NULL, /* single */
- .credit = NULL, /* consume credit */
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = true,
- .add = false /* don't care */,
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
-
- /* set extra args */
- vfop->args.filters = filters;
-
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
+ DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
+ vf->abs_vfid, filter->add ? "Adding" : "Deleting",
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+
+ /* Prepare ramrod params */
+ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+ if (filter->type == BNX2X_VF_FILTER_VLAN) {
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ ramrod.user_req.u.vlan.vlan = filter->vid;
+ } else {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+ }
+ ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+
+ /* Verify there are available vlan credits */
+ if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
+ (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
+ vf_vlan_rules_cnt(vf))) {
+ BNX2X_ERR("No credits for vlan\n");
+ return -ENOMEM;
}
- return -ENOMEM;
-}
-
-int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *macs,
- int qid, bool drv_only)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = macs,
- .credit = NULL, /* consume credit */
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = false,
- .add = false, /* don't care since only the items in the
- * filters list affect the sp operation,
- * not the list itself
- */
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+ set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+ /* Add/Remove the filter */
+ rc = bnx2x_config_vlan_mac(bp, &ramrod);
+ if (rc && rc != -EEXIST) {
+ BNX2X_ERR("Failed to %s %s\n",
+ filter->add ? "add" : "delete",
+ filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
+ "VLAN");
+ return rc;
+ }
- /* set extra args */
- filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
- vfop->args.filters = filters;
+ /* Update the vlan counters */
+ if (filter->type == BNX2X_VF_FILTER_VLAN)
+ bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
+ &bnx2x_vfq(vf, qid, vlan_count));
- bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
- }
- return -ENOMEM;
+ return 0;
}
-static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, u16 vid, bool add)
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
+ int rc = 0, i;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = NULL, /* single command */
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = false,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = true,
- .add = add,
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
- ramrod->user_req.u.vlan.vlan = vid;
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- /* set extra args */
- vfop->args.filters = filters;
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
+ /* Prepare ramrod params */
+ for (i = 0; i < filters->count; i++) {
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i], drv_only);
+ if (rc)
+ break;
}
- return -ENOMEM;
-}
-
-static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, bool drv_only)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = NULL, /* single command */
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = true,
- .add = false, /* don't care */
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+ /* Rollback if needed */
+ if (i != filters->count) {
+ BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
+ i, filters->count + 1);
+ while (--i >= 0) {
+ filters->filters[i].add = !filters->filters[i].add;
+ bnx2x_vf_mac_vlan_config(bp, vf, qid,
+ &filters->filters[i],
+ drv_only);
+ }
+ }
- /* set extra args */
- vfop->args.filters = filters;
+ /* It's our responsibility to free the filters */
+ kfree(filters);
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
- }
- return -ENOMEM;
+ return rc;
}
-int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *vlans,
- int qid, bool drv_only)
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
- if (vfop) {
- struct bnx2x_vfop_args_filters filters = {
- .multi_filter = vlans,
- .credit = &bnx2x_vfq(vf, qid, vlan_count),
- };
- struct bnx2x_vfop_vlan_mac_flags flags = {
- .drv_only = drv_only,
- .dont_consume = (filters.credit != NULL),
- .single_cmd = false,
- .add = false, /* don't care */
- };
- struct bnx2x_vlan_mac_ramrod_params *ramrod =
- &vf->op_params.vlan_mac;
-
- /* set ramrod params */
- bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
-
- /* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
- ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
-
- /* set extra args */
- filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
- atomic_read(filters.credit);
-
- vfop->args.filters = filters;
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
- bnx2x_vfop_vlan_mac, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
- cmd->block);
- }
- return -ENOMEM;
-}
-
-/* VFOP queue setup (queue constructor + set vlan 0) */
-static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- int qid = vfop->args.qctor.qid;
- enum bnx2x_vfop_qsetup_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_qsetup,
- .block = false,
- };
-
- if (vfop->rc < 0)
+ rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
+ if (rc)
goto op_err;
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ /* Configure vlan0 for leading queue */
+ if (!qid) {
+ struct bnx2x_vf_mac_vlan_filter filter;
- switch (state) {
- case BNX2X_VFOP_QSETUP_CTOR:
- /* init the queue ctor command */
- vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
- vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
- if (vfop->rc)
+ memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
+ filter.type = BNX2X_VF_FILTER_VLAN;
+ filter.add = true;
+ filter.vid = 0;
+ rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
+ if (rc)
goto op_err;
- return;
-
- case BNX2X_VFOP_QSETUP_VLAN0:
- /* skip if non-leading or FPGA/EMU*/
- if (qid)
- goto op_done;
+ }
- /* init the queue set-vlan command (for vlan 0) */
- vfop->state = BNX2X_VFOP_QSETUP_DONE;
- vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
- if (vfop->rc)
- goto op_err;
- return;
+ /* Schedule the configuration of any pending vlan filters */
+ vf->cfg_flags |= VF_CFG_VLAN;
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_MSG_IOV);
+ return 0;
op_err:
- BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
-op_done:
- case BNX2X_VFOP_QSETUP_DONE:
- vf->cfg_flags |= VF_CFG_VLAN;
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
- }
+ BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
}
-int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
+static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
- if (vfop) {
- vfop->args.qctor.qid = qid;
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
- bnx2x_vfop_qsetup, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
- cmd->block);
+ /* If needed, clean the filtering data base */
+ if ((qid == LEADING_IDX) &&
+ bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+ if (rc)
+ goto op_err;
}
- return -ENOMEM;
-}
-
-/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
-static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- int qid = vfop->args.qx.qid;
- enum bnx2x_vfop_qflr_state state = vfop->state;
- struct bnx2x_queue_state_params *qstate;
- struct bnx2x_vfop_cmd cmd;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
- DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
+ /* Terminate queue */
+ if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
+ struct bnx2x_queue_state_params qstate;
- cmd.done = bnx2x_vfop_qflr;
- cmd.block = false;
-
- switch (state) {
- case BNX2X_VFOP_QFLR_CLR_VLAN:
- /* vlan-clear-all: driver-only, don't consume credit */
- vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
-
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
- /* the vlan_mac vfop will re-schedule us */
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
- qid, true);
- if (vfop->rc)
- goto op_err;
- return;
-
- } else {
- /* need to reschedule ourselves */
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
-
- case BNX2X_VFOP_QFLR_CLR_MAC:
- /* mac-clear-all: driver only consume credit */
- vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
- /* the vlan_mac vfop will re-schedule us */
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
- qid, true);
- if (vfop->rc)
- goto op_err;
- return;
-
- } else {
- /* need to reschedule ourselves */
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
-
- case BNX2X_VFOP_QFLR_TERMINATE:
- qstate = &vfop->op_p->qctor.qstate;
- memset(qstate , 0, sizeof(*qstate));
- qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
- vfop->state = BNX2X_VFOP_QFLR_DONE;
-
- DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
- vf->abs_vfid, qstate->q_obj->state);
-
- if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
- qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
- qstate->cmd = BNX2X_Q_CMD_TERMINATE;
- vfop->rc = bnx2x_queue_state_change(bp, qstate);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
- } else {
- goto op_done;
- }
-
-op_err:
- BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
- vf->abs_vfid, qid, vfop->rc);
-op_done:
- case BNX2X_VFOP_QFLR_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
+ qstate.cmd = BNX2X_Q_CMD_TERMINATE;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc)
+ goto op_err;
}
-op_pending:
- return;
-}
-
-static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
- bnx2x_vfop_qflr, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
- cmd->block);
- }
- return -ENOMEM;
+ return 0;
+op_err:
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+ return rc;
}
-/* VFOP multi-casts */
-static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
- struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
- struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
- enum bnx2x_vfop_mcast_state state = vfop->state;
- int i;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
+ struct bnx2x_mcast_list_elem *mc = NULL;
+ struct bnx2x_mcast_ramrod_params mcast;
+ int rc, i;
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_MCAST_DEL:
- /* clear existing mcasts */
- vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
- : BNX2X_VFOP_MCAST_CHK_DONE;
- mcast->mcast_list_len = vf->mcast_list_len;
- vf->mcast_list_len = args->mc_num;
- vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
- case BNX2X_VFOP_MCAST_ADD:
- if (raw->check_pending(raw))
- goto op_pending;
-
- /* update mcast list on the ramrod params */
- INIT_LIST_HEAD(&mcast->mcast_list);
- for (i = 0; i < args->mc_num; i++)
- list_add_tail(&(args->mc[i].link),
- &mcast->mcast_list);
- mcast->mcast_list_len = args->mc_num;
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- /* add new mcasts */
- vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
- vfop->rc = bnx2x_config_mcast(bp, mcast,
- BNX2X_MCAST_CMD_ADD);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
- case BNX2X_VFOP_MCAST_CHK_DONE:
- vfop->rc = raw->check_pending(raw) ? 1 : 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
- default:
- bnx2x_vfop_default(state);
+ /* Prepare Multicast command */
+ memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
+ mcast.mcast_obj = &vf->mcast_obj;
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
+ else
+ set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
+ if (mc_num) {
+ mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
+ GFP_KERNEL);
+ if (!mc) {
+ BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
+ return -ENOMEM;
+ }
}
-op_err:
- BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
-op_done:
- kfree(args->mc);
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- return;
-}
-int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- bnx2x_mac_addr_t *mcasts,
- int mcast_num, bool drv_only)
-{
- struct bnx2x_vfop *vfop = NULL;
- size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
- struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
- NULL;
-
- if (!mc_sz || mc) {
- vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- int i;
- struct bnx2x_mcast_ramrod_params *ramrod =
- &vf->op_params.mcast;
-
- /* set ramrod params */
- memset(ramrod, 0, sizeof(*ramrod));
- ramrod->mcast_obj = &vf->mcast_obj;
- if (drv_only)
- set_bit(RAMROD_DRV_CLR_ONLY,
- &ramrod->ramrod_flags);
-
- /* copy mcasts pointers */
- vfop->args.mc_list.mc_num = mcast_num;
- vfop->args.mc_list.mc = mc;
- for (i = 0; i < mcast_num; i++)
- mc[i].mac = mcasts[i];
-
- bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
- bnx2x_vfop_mcast, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
- cmd->block);
- } else {
+ /* clear existing mcasts */
+ mcast.mcast_list_len = vf->mcast_list_len;
+ vf->mcast_list_len = mc_num;
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
+ if (rc) {
+ BNX2X_ERR("Failed to remove multicasts\n");
+ if (mc)
kfree(mc);
- }
+ return rc;
}
- return -ENOMEM;
-}
-
-/* VFOP rx-mode */
-static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
- enum bnx2x_vfop_rxmode_state state = vfop->state;
-
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_RXMODE_CONFIG:
- /* next state */
- vfop->state = BNX2X_VFOP_RXMODE_DONE;
+ /* update mcast list on the ramrod params */
+ if (mc_num) {
+ INIT_LIST_HEAD(&mcast.mcast_list);
+ for (i = 0; i < mc_num; i++) {
+ mc[i].mac = mcasts[i];
+ list_add_tail(&mc[i].link,
+ &mcast.mcast_list);
+ }
- /* record the accept flags in vfdb so hypervisor can modify them
- * if necessary
- */
- bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
- ramrod->rx_accept_flags;
- vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
- BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
-op_done:
- case BNX2X_VFOP_RXMODE_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
+ /* add new mcasts */
+ rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
+ if (rc)
+ BNX2X_ERR("Faled to add multicasts\n");
+ kfree(mc);
}
-op_pending:
- return;
+
+ return rc;
}
static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
@@ -1268,118 +644,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
}
-int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, unsigned long accept_flags)
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- if (vfop) {
- struct bnx2x_rx_mode_ramrod_params *ramrod =
- &vf->op_params.rx_mode;
+ struct bnx2x_rx_mode_ramrod_params ramrod;
- bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
- bnx2x_vfop_rxmode, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
- cmd->block);
- }
- return -ENOMEM;
+ bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
+ set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+ vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
+ return bnx2x_config_rx_mode(bp, &ramrod);
}
-/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
- * queue destructor)
- */
-static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- int qid = vfop->args.qx.qid;
- enum bnx2x_vfop_qteardown_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd;
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- cmd.done = bnx2x_vfop_qdown;
- cmd.block = false;
-
- switch (state) {
- case BNX2X_VFOP_QTEARDOWN_RXMODE:
- /* Drop all */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
- vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
- if (vfop->rc)
- goto op_err;
- return;
-
- case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
- /* vlan-clear-all: don't consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
- if (vfop->rc)
- goto op_err;
- return;
-
- case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
- /* mac-clear-all: consume credit */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
- if (vfop->rc)
- goto op_err;
- return;
+ int rc;
- case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
- vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
- if (vfop->rc)
- goto op_err;
- return;
+ DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
- case BNX2X_VFOP_QTEARDOWN_QDTOR:
- /* run the queue destruction flow */
- DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
- vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
- DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
- vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
- DP(BNX2X_MSG_IOV, "returned from cmd\n");
- if (vfop->rc)
+ /* Remove all classification configuration for leading queue */
+ if (qid == LEADING_IDX) {
+ rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
+ if (rc)
goto op_err;
- return;
-op_err:
- BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
- vf->abs_vfid, qid, vfop->rc);
-
- case BNX2X_VFOP_QTEARDOWN_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
- }
-}
-int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
- /* for non leading queues skip directly to qdown sate */
- if (vfop) {
- vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(qid == LEADING_IDX ?
- BNX2X_VFOP_QTEARDOWN_RXMODE :
- BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
- cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
- cmd->block);
+ /* Remove filtering if feasible */
+ if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false, false);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false, true);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
+ if (rc)
+ goto op_err;
+ }
}
- return -ENOMEM;
+ /* Destroy queue */
+ rc = bnx2x_vf_queue_destroy(bp, vf, qid);
+ if (rc)
+ goto op_err;
+ return rc;
+op_err:
+ BNX2X_ERR("vf[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, rc);
+ return rc;
}
/* VF enable primitives
@@ -1579,120 +893,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
bnx2x_tx_hw_flushed(bp, poll_cnt);
}
-static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
- enum bnx2x_vfop_flr_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_flr,
- .block = false,
- };
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+ int rc, i;
- switch (state) {
- case BNX2X_VFOP_FLR_QUEUES:
- /* the cleanup operations are valid if and only if the VF
- * was first acquired.
- */
- if (++(qx->qid) < vf_rxq_count(vf)) {
- vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
- qx->qid);
- if (vfop->rc)
- goto op_err;
- return;
- }
- /* remove multicasts */
- vfop->state = BNX2X_VFOP_FLR_HW;
- vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
- 0, true);
- if (vfop->rc)
- goto op_err;
- return;
- case BNX2X_VFOP_FLR_HW:
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- /* dispatch final cleanup and wait for HW queues to flush */
- bnx2x_vf_flr_clnup_hw(bp, vf);
+ /* the cleanup operations are valid if and only if the VF
+ * was first acquired.
+ */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_flr(bp, vf, i);
+ if (rc)
+ goto out;
+ }
- /* release VF resources */
- bnx2x_vf_free_resc(bp, vf);
+ /* remove multicasts */
+ bnx2x_vf_mcast(bp, vf, NULL, 0, true);
- /* re-open the mailbox */
- bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ /* dispatch final cleanup and wait for HW queues to flush */
+ bnx2x_vf_flr_clnup_hw(bp, vf);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
- }
-op_err:
- BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
- vf->flr_clnup_stage = VF_FLR_ACK;
- bnx2x_vfop_end(bp, vf, vfop);
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
-}
+ /* release VF resources */
+ bnx2x_vf_free_resc(bp, vf);
-static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- vfop_handler_t done)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- vfop->args.qx.qid = -1; /* loop */
- bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
- bnx2x_vfop_flr, done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
- }
- return -ENOMEM;
+ /* re-open the mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ return;
+out:
+ BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
+ vf->abs_vfid, i, rc);
}
-static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
{
- int i = prev_vf ? prev_vf->index + 1 : 0;
struct bnx2x_virtf *vf;
+ int i;
- /* find next VF to cleanup */
-next_vf_to_clean:
- for (;
- i < BNX2X_NR_VIRTFN(bp) &&
- (bnx2x_vf(bp, i, state) != VF_RESET ||
- bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
- i++)
- ;
+ for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
+ /* VF should be RESET & in FLR cleanup states */
+ if (bnx2x_vf(bp, i, state) != VF_RESET ||
+ !bnx2x_vf(bp, i, flr_clnup_stage))
+ continue;
- DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
- BNX2X_NR_VIRTFN(bp));
+ DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
+ i, BNX2X_NR_VIRTFN(bp));
- if (i < BNX2X_NR_VIRTFN(bp)) {
vf = BP_VF(bp, i);
/* lock the vf pf channel */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
/* invoke the VF FLR SM */
- if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
- BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
- vf->abs_vfid);
+ bnx2x_vf_flr(bp, vf);
- /* mark the VF to be ACKED and continue */
- vf->flr_clnup_stage = VF_FLR_ACK;
- goto next_vf_to_clean;
- }
- return;
- }
-
- /* we are done, update vf records */
- for_each_vf(bp, i) {
- vf = BP_VF(bp, i);
-
- if (vf->flr_clnup_stage != VF_FLR_ACK)
- continue;
-
- vf->flr_clnup_stage = VF_FLR_EPILOG;
+ /* mark the VF to be ACKED and continue */
+ vf->flr_clnup_stage = false;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
}
/* Acknowledge the handled VFs.
@@ -1742,7 +999,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
if (reset) {
/* set as reset and ready for cleanup */
vf->state = VF_RESET;
- vf->flr_clnup_stage = VF_FLR_CLN;
+ vf->flr_clnup_stage = true;
DP(BNX2X_MSG_IOV,
"Initiating Final cleanup for VF %d\n",
@@ -1751,7 +1008,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
}
/* do the FLR cleanup for all marked VFs*/
- bnx2x_vf_flr_clnup(bp, NULL);
+ bnx2x_vf_flr_clnup(bp);
}
/* IOV global initialization routines */
@@ -2018,7 +1275,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
bnx2x_vf(bp, i, index) = i;
bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
bnx2x_vf(bp, i, state) = VF_FREE;
- INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
mutex_init(&bnx2x_vf(bp, i, op_mutex));
bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
}
@@ -2039,6 +1295,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
goto failed;
}
+ /* Prepare the VFs event synchronization mechanism */
+ mutex_init(&bp->vfdb->event_mutex);
+
return 0;
failed:
DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -2117,7 +1376,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
if (cxt->size) {
- BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
+ cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
+ if (!cxt->addr)
+ goto alloc_mem_err;
} else {
cxt->addr = NULL;
cxt->mapping = 0;
@@ -2127,20 +1388,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
/* allocate vfs ramrods dma memory - client_init and set_mac */
tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
- BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
- tot_size);
+ BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
+ tot_size);
+ if (!BP_VFDB(bp)->sp_dma.addr)
+ goto alloc_mem_err;
BP_VFDB(bp)->sp_dma.size = tot_size;
/* allocate mailboxes */
tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
- BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
- tot_size);
+ BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_MBX_DMA(bp)->addr)
+ goto alloc_mem_err;
+
BP_VF_MBX_DMA(bp)->size = tot_size;
/* allocate local bulletin boards */
tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
- BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
- &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
+ BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_BULLETIN_DMA(bp)->addr)
+ goto alloc_mem_err;
+
BP_VF_BULLETIN_DMA(bp)->size = tot_size;
return 0;
@@ -2166,6 +1435,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_sp_map(bp, vf, q_data),
q_type);
+ /* sp indication is set only when vlan/mac/etc. are initialized */
+ q->sp_initialized = false;
+
DP(BNX2X_MSG_IOV,
"initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
vf->abs_vfid, q->sp_obj.func_id, q->cid);
@@ -2269,7 +1541,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
/* release all the VFs */
for_each_vf(bp, i)
- bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
+ bnx2x_vf_release(bp, BP_VF(bp, i));
return 0;
}
@@ -2359,6 +1631,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
smp_mb__after_clear_bit();
}
+static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
+}
+
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
{
struct bnx2x_virtf *vf;
@@ -2383,6 +1661,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
case EVENT_RING_OPCODE_MULTICAST_RULES:
case EVENT_RING_OPCODE_FILTERS_RULES:
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
cid = (elem->message.data.eth_event.echo &
BNX2X_SWCID_MASK);
DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
@@ -2447,13 +1726,15 @@ get_vf:
vf->abs_vfid, qidx);
bnx2x_vf_handle_filters_eqe(bp, vf);
break;
+ case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_rss_update_eqe(bp, vf);
case EVENT_RING_OPCODE_VF_FLR:
case EVENT_RING_OPCODE_MALICIOUS_VF:
/* Do nothing for now */
return 0;
}
- /* SRIOV: reschedule any 'in_progress' operations */
- bnx2x_iov_sp_event(bp, cid, false);
return 0;
}
@@ -2490,23 +1771,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
}
}
-void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
-{
- struct bnx2x_virtf *vf;
-
- /* check if the cid is the VF range */
- if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
- return;
-
- vf = bnx2x_vf_by_cid(bp, vf_cid);
- if (vf) {
- /* set in_progress flag */
- atomic_set(&vf->op_in_progress, 1);
- if (queue_work)
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
- }
-}
-
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
{
int i;
@@ -2527,10 +1791,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
(is_fcoe ? 0 : 1);
- DP(BNX2X_MSG_IOV,
- "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
- BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
- first_queue_query_index + num_queues_req);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats) +
@@ -2544,9 +1808,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
struct bnx2x_virtf *vf = BP_VF(bp, i);
if (vf->state != VF_ENABLED) {
- DP(BNX2X_MSG_IOV,
- "vf %d not enabled so no stats for it\n",
- vf->abs_vfid);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
continue;
}
@@ -2588,32 +1852,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
}
-void bnx2x_iov_sp_task(struct bnx2x *bp)
-{
- int i;
-
- if (!IS_SRIOV(bp))
- return;
- /* Iterate over all VFs and invoke state transition for VFs with
- * 'in-progress' slow-path operations
- */
- DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
- for_each_vf(bp, i) {
- struct bnx2x_virtf *vf = BP_VF(bp, i);
-
- if (!vf) {
- BNX2X_ERR("VF was null! skipping...\n");
- continue;
- }
-
- if (!list_empty(&vf->op_list_head) &&
- atomic_read(&vf->op_in_progress)) {
- DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
- bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
- }
- }
-}
-
static inline
struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
{
@@ -2849,52 +2087,26 @@ static void bnx2x_set_vf_state(void *cookie)
p->vf->state = p->state;
}
-/* VFOP close (teardown the queues, delete mcasts and close HW) */
-static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
- enum bnx2x_vfop_close_state state = vfop->state;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_close,
- .block = false,
- };
+ int rc = 0, i;
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_CLOSE_QUEUES:
-
- if (++(qx->qid) < vf_rxq_count(vf)) {
- vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
- if (vfop->rc)
- goto op_err;
- return;
- }
- vfop->state = BNX2X_VFOP_CLOSE_HW;
- vfop->rc = 0;
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
- case BNX2X_VFOP_CLOSE_HW:
-
- /* disable the interrupts */
- DP(BNX2X_MSG_IOV, "disabling igu\n");
- bnx2x_vf_igu_disable(bp, vf);
+ /* Close all queues */
+ for (i = 0; i < vf_rxq_count(vf); i++) {
+ rc = bnx2x_vf_queue_teardown(bp, vf, i);
+ if (rc)
+ goto op_err;
+ }
- /* disable the VF */
- DP(BNX2X_MSG_IOV, "clearing qtbl\n");
- bnx2x_vf_clr_qtbl(bp, vf);
+ /* disable the interrupts */
+ DP(BNX2X_MSG_IOV, "disabling igu\n");
+ bnx2x_vf_igu_disable(bp, vf);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
- }
-op_err:
- BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
+ /* disable the VF */
+ DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+ bnx2x_vf_clr_qtbl(bp, vf);
/* need to make sure there are no outstanding stats ramrods which may
* cause the device to access the VF's stats buffer which it will free
@@ -2909,43 +2121,20 @@ op_done:
}
DP(BNX2X_MSG_IOV, "set state to acquired\n");
- bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
- /* Not supported at the moment; Exists for macros only */
- return;
-}
-int bnx2x_vfop_close_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- vfop->args.qx.qid = -1; /* loop */
- bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
- bnx2x_vfop_close, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
- cmd->block);
- }
- return -ENOMEM;
+ return 0;
+op_err:
+ BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
}
/* VF release can be called either: 1. The VF was acquired but
* not enabled 2. the vf was enabled or in the process of being
* enabled
*/
-static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_release,
- .block = false,
- };
-
- DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
-
- if (vfop->rc < 0)
- goto op_err;
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
vf->state == VF_FREE ? "Free" :
@@ -2956,116 +2145,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (vf->state) {
case VF_ENABLED:
- vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
- if (vfop->rc)
+ rc = bnx2x_vf_close(bp, vf);
+ if (rc)
goto op_err;
- return;
-
+ /* Fallthrough to release resources */
case VF_ACQUIRED:
DP(BNX2X_MSG_IOV, "about to free resources\n");
bnx2x_vf_free_resc(bp, vf);
- DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
- goto op_done;
+ break;
case VF_FREE:
case VF_RESET:
- /* do nothing */
- goto op_done;
default:
- bnx2x_vfop_default(vf->state);
+ break;
}
+ return 0;
op_err:
- BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
- bnx2x_vfop_end(bp, vf, vfop);
+ BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
+ return rc;
}
-static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- enum bnx2x_vfop_rss_state state;
-
- if (!vfop) {
- BNX2X_ERR("vfop was null\n");
- return;
- }
-
- state = vfop->state;
- bnx2x_vfop_reset_wq(vf);
-
- if (vfop->rc < 0)
- goto op_err;
-
- DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
- switch (state) {
- case BNX2X_VFOP_RSS_CONFIG:
- /* next state */
- vfop->state = BNX2X_VFOP_RSS_DONE;
- bnx2x_config_rss(bp, &vfop->op_p->rss);
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
- BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
-op_done:
- case BNX2X_VFOP_RSS_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
- default:
- bnx2x_vfop_default(state);
- }
-op_pending:
- return;
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+ set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
+ return bnx2x_config_rss(bp, rss);
}
-int bnx2x_vfop_release_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- bnx2x_vfop_opset(-1, /* use vf->state */
- bnx2x_vfop_release, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
- cmd->block);
- }
- return -ENOMEM;
-}
+ aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
+ struct bnx2x_queue_state_params qstate;
+ int qid, rc = 0;
-int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+ /* Set ramrod params */
+ memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+ memcpy(&qstate.params.update_tpa, params,
+ sizeof(struct bnx2x_queue_update_tpa_params));
+ qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
- if (vfop) {
- bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
- cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
- cmd->block);
+ for (qid = 0; qid < vf_rxq_count(vf); qid++) {
+ qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ qstate.params.update_tpa.sge_map = sge_addr[qid];
+ DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
+ vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
+ U64_LO(sge_addr[qid]));
+ rc = bnx2x_queue_state_change(bp, &qstate);
+ if (rc) {
+ BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
+ U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
+ vf->abs_vfid, qid);
+ return rc;
+ }
}
- return -ENOMEM;
+
+ return rc;
}
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
-void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = NULL,
- .block = block,
- };
int rc;
DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
- rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+ rc = bnx2x_vf_free(bp, vf);
if (rc)
WARN(rc,
"VF[%d] Failed to allocate resources for release op- rc=%d\n",
vf->abs_vfid, rc);
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+ return rc;
}
static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
@@ -3074,16 +2234,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
*sbdf = vf->devfn | (vf->bus << 8);
}
-static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
- struct bnx2x_vf_bar_info *bar_info)
-{
- int n;
-
- bar_info->nr_bars = bp->vfdb->sriov.nres;
- for (n = 0; n < bar_info->nr_bars; n++)
- bar_info->bars[n] = vf->bars[n];
-}
-
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
@@ -3405,13 +2555,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
- if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
+ if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
0, ETH_ALEN);
- if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
vlan_obj->get_n_elements(bp, vlan_obj, 1,
(u8 *)&ivi->vlan, 0,
VLAN_HLEN);
+ }
} else {
/* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3485,17 +2635,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj =
- &bnx2x_leading_vfq(vf, mac_obj);
+ struct bnx2x_vlan_mac_obj *mac_obj;
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
+ /* User should be able to see failure reason in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
/* remove existing eth macs */
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete eth macs\n");
@@ -3569,17 +2719,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
BNX2X_Q_LOGICAL_STATE_ACTIVE)
return rc;
- /* configure the vlan in device on this vf's queue */
- vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
+ /* User should be able to see error in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
/* remove existing vlans */
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc) {
@@ -3736,13 +2885,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
bnx2x_sample_bulletin(bp);
/* if channel is down we need to self destruct */
- if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- }
+ if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_MSG_IOV);
}
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
@@ -3756,12 +2901,16 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
mutex_init(&bp->vf2pf_mutex);
/* allocate vf2pf mailbox for vf to pf channel */
- BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
- sizeof(struct bnx2x_vf_mbx_msg));
+ bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ if (!bp->vf2pf_mbox)
+ goto alloc_mem_err;
/* allocate pf 2 vf bulletin board */
- BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
- sizeof(union pf_vf_bulletin));
+ bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+ if (!bp->pf2vf_bulletin)
+ goto alloc_mem_err;
return 0;
@@ -3792,3 +2941,28 @@ void bnx2x_iov_channel_down(struct bnx2x *bp)
bnx2x_post_vf_bulletin(bp, vf_idx);
}
}
+
+void bnx2x_iov_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
+ &bp->iov_task_state))
+ bnx2x_vf_handle_flr_event(bp);
+
+ if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
+ &bp->iov_task_state))
+ bnx2x_vf_mbx(bp);
+}
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
+{
+ smp_mb__before_clear_bit();
+ set_bit(flag, &bp->iov_task_state);
+ smp_mb__after_clear_bit();
+ DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d9fcca1b5a9d..8bf764570eef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -30,6 +30,8 @@ enum sample_bulletin_result {
#ifdef CONFIG_BNX2X_SRIOV
+extern struct workqueue_struct *bnx2x_iov_wq;
+
/* The bnx2x device structure holds vfdb structure described below.
* The VF array is indexed by the relative vfid.
*/
@@ -83,108 +85,35 @@ struct bnx2x_vf_queue {
u16 index;
u16 sb_idx;
bool is_leading;
+ bool sp_initialized;
};
-/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
- * q-init, q-setup and SB index
+/* struct bnx2x_vf_queue_construct_params - prepare queue construction
+ * parameters: q-init, q-setup and SB index
*/
-struct bnx2x_vfop_qctor_params {
+struct bnx2x_vf_queue_construct_params {
struct bnx2x_queue_state_params qstate;
struct bnx2x_queue_setup_params prep_qsetup;
};
-/* VFOP parameters (one copy per VF) */
-union bnx2x_vfop_params {
- struct bnx2x_vlan_mac_ramrod_params vlan_mac;
- struct bnx2x_rx_mode_ramrod_params rx_mode;
- struct bnx2x_mcast_ramrod_params mcast;
- struct bnx2x_config_rss_params rss;
- struct bnx2x_vfop_qctor_params qctor;
-};
-
/* forward */
struct bnx2x_virtf;
/* VFOP definitions */
-typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
-
-struct bnx2x_vfop_cmd {
- vfop_handler_t done;
- bool block;
-};
-/* VFOP queue filters command additional arguments */
-struct bnx2x_vfop_filter {
- struct list_head link;
+struct bnx2x_vf_mac_vlan_filter {
int type;
-#define BNX2X_VFOP_FILTER_MAC 1
-#define BNX2X_VFOP_FILTER_VLAN 2
+#define BNX2X_VF_FILTER_MAC 1
+#define BNX2X_VF_FILTER_VLAN 2
bool add;
u8 *mac;
u16 vid;
};
-struct bnx2x_vfop_filters {
- int add_cnt;
- struct list_head head;
- struct bnx2x_vfop_filter filters[];
-};
-
-/* transient list allocated, built and saved until its
- * passed to the SP-VERBs layer.
- */
-struct bnx2x_vfop_args_mcast {
- int mc_num;
- struct bnx2x_mcast_list_elem *mc;
-};
-
-struct bnx2x_vfop_args_qctor {
- int qid;
- u16 sb_idx;
-};
-
-struct bnx2x_vfop_args_qdtor {
- int qid;
- struct eth_context *cxt;
-};
-
-struct bnx2x_vfop_args_defvlan {
- int qid;
- bool enable;
- u16 vid;
- u8 prio;
-};
-
-struct bnx2x_vfop_args_qx {
- int qid;
- bool en_add;
-};
-
-struct bnx2x_vfop_args_filters {
- struct bnx2x_vfop_filters *multi_filter;
- atomic_t *credit; /* non NULL means 'don't consume credit' */
-};
-
-union bnx2x_vfop_args {
- struct bnx2x_vfop_args_mcast mc_list;
- struct bnx2x_vfop_args_qctor qctor;
- struct bnx2x_vfop_args_qdtor qdtor;
- struct bnx2x_vfop_args_defvlan defvlan;
- struct bnx2x_vfop_args_qx qx;
- struct bnx2x_vfop_args_filters filters;
-};
-
-struct bnx2x_vfop {
- struct list_head link;
- int rc; /* return code */
- int state; /* next state */
- union bnx2x_vfop_args args; /* extra arguments */
- union bnx2x_vfop_params *op_p; /* ramrod params */
-
- /* state machine callbacks */
- vfop_handler_t transition;
- vfop_handler_t done;
+struct bnx2x_vf_mac_vlan_filters {
+ int count;
+ struct bnx2x_vf_mac_vlan_filter filters[];
};
/* vf context */
@@ -204,15 +133,7 @@ struct bnx2x_virtf {
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
- /* non 0 during flr cleanup */
- u8 flr_clnup_stage;
-#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
- * sans the end-wait
- */
-#define VF_FLR_ACK 2 /* ACK flr notification */
-#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
- * ~ final cleanup' end wait
- */
+ bool flr_clnup_stage; /* true during flr cleanup */
/* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
@@ -276,11 +197,6 @@ struct bnx2x_virtf {
struct bnx2x_rss_config_obj rss_conf_obj;
/* slow-path operations */
- atomic_t op_in_progress;
- int op_rc;
- bool op_wait_blocking;
- struct list_head op_list_head;
- union bnx2x_vfop_params op_params;
struct mutex op_mutex; /* one vfop at a time mutex */
enum channel_tlvs op_current;
};
@@ -338,11 +254,6 @@ struct bnx2x_vf_mbx {
u32 vf_addr_hi;
struct vfpf_first_tlv first_tlv; /* saved VF request header */
-
- u8 flags;
-#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
- * more then one pending msg
- */
};
struct bnx2x_vf_sp {
@@ -419,6 +330,10 @@ struct bnx2x_vfdb {
/* the number of msix vectors belonging to this PF designated for VFs */
u16 vf_sbs_pool;
u16 first_vf_igu_entry;
+
+ /* sp_rtnl synchronization */
+ struct mutex event_mutex;
+ u64 event_occur;
};
/* queue access */
@@ -468,13 +383,13 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);
void bnx2x_iov_init_dmae(struct bnx2x *bp);
void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj);
-void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
-void bnx2x_iov_sp_task(struct bnx2x *bp);
/* global vf mailbox routines */
-void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_mbx(struct bnx2x *bp);
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event);
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
/* CORE VF API */
@@ -487,162 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
dma_addr_t *sb_map);
-/* VFOP generic helpers */
-#define bnx2x_vfop_default(state) do { \
- BNX2X_ERR("Bad state %d\n", (state)); \
- vfop->rc = -EINVAL; \
- goto op_err; \
- } while (0)
-
-enum {
- VFOP_DONE,
- VFOP_CONT,
- VFOP_VERIFY_PEND,
-};
-
-#define bnx2x_vfop_finalize(vf, rc, next) do { \
- if ((rc) < 0) \
- goto op_err; \
- else if ((rc) > 0) \
- goto op_pending; \
- else if ((next) == VFOP_DONE) \
- goto op_done; \
- else if ((next) == VFOP_VERIFY_PEND) \
- BNX2X_ERR("expected pending\n"); \
- else { \
- DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \
- atomic_set(&vf->op_in_progress, 1); \
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
- return; \
- } \
- } while (0)
-
-#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
- do { \
- vfop->state = first_state; \
- vfop->op_p = &vf->op_params; \
- vfop->transition = trans_hndlr; \
- vfop->done = done_hndlr; \
- } while (0)
-
-static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
-{
- WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
- WARN_ON(list_empty(&vf->op_list_head));
- return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
-}
-
-static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
-{
- struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
-
- WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
- if (vfop) {
- INIT_LIST_HEAD(&vfop->link);
- list_add(&vfop->link, &vf->op_list_head);
- }
- return vfop;
-}
-
-static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
- struct bnx2x_vfop *vfop)
-{
- /* rc < 0 - error, otherwise set to 0 */
- DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
- if (vfop->rc >= 0)
- vfop->rc = 0;
- DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
-
- /* unlink the current op context and propagate error code
- * must be done before invoking the 'done()' handler
- */
- WARN(!mutex_is_locked(&vf->op_mutex),
- "about to access vf op linked list but mutex was not locked!");
- list_del(&vfop->link);
-
- if (list_empty(&vf->op_list_head)) {
- DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
- vf->op_rc = vfop->rc;
- DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
- vf->op_rc, vfop->rc);
- } else {
- struct bnx2x_vfop *cur_vfop;
-
- DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
- cur_vfop = bnx2x_vfop_cur(bp, vf);
- cur_vfop->rc = vfop->rc;
- DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
- vf->op_rc, vfop->rc);
- }
-
- /* invoke done handler */
- if (vfop->done) {
- DP(BNX2X_MSG_IOV, "calling done handler\n");
- vfop->done(bp, vf);
- } else {
- /* there is no done handler for the operation to unlock
- * the mutex. Must have gotten here from PF initiated VF RELEASE
- */
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
- }
-
- DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
- vf->op_rc, vfop->rc);
-
- /* if this is the last nested op reset the wait_blocking flag
- * to release any blocking wrappers, only after 'done()' is invoked
- */
- if (list_empty(&vf->op_list_head)) {
- DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
- vf->op_wait_blocking = false;
- }
-
- kfree(vfop);
-}
-
-static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
-{
- /* can take a while if any port is running */
- int cnt = 5000;
-
- might_sleep();
- while (cnt--) {
- if (vf->op_wait_blocking == false) {
-#ifdef BNX2X_STOP_ON_ERROR
- DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
-#endif
- return 0;
- }
- usleep_range(1000, 2000);
-
- if (bp->panic)
- return -EIO;
- }
-
- /* timeout! */
-#ifdef BNX2X_STOP_ON_ERROR
- bnx2x_panic();
-#endif
-
- return -EBUSY;
-}
-
-static inline int bnx2x_vfop_transition(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- vfop_handler_t transition,
- bool block)
-{
- if (block)
- vf->op_wait_blocking = true;
- transition(bp, vf);
- if (block)
- return bnx2x_vfop_wait_blocking(bp, vf);
- return 0;
-}
-
/* VFOP queue construction helpers */
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_params,
@@ -657,59 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q,
- struct bnx2x_vfop_qctor_params *p,
+ struct bnx2x_vf_queue_construct_params *p,
unsigned long q_type);
-int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *macs,
- int qid, bool drv_only);
-
-int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- struct bnx2x_vfop_filters *vlans,
- int qid, bool drv_only);
-
-int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid);
-
-int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid);
-
-int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- bnx2x_mac_addr_t *mcasts,
- int mcast_num, bool drv_only);
-
-int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd,
- int qid, unsigned long accept_flags);
-
-int bnx2x_vfop_close_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd);
-
-int bnx2x_vfop_release_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd);
-int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd);
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mac_vlan_filters *filters,
+ int qid, bool drv_only);
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+ struct bnx2x_vf_queue_construct_params *qctor);
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ int qid, unsigned long accept_flags);
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_config_rss_params *rss);
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vfpf_tpa_tlv *tlv,
+ struct bnx2x_queue_update_tpa_params *params);
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
*/
-void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
@@ -772,18 +513,20 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
void bnx2x_iov_channel_down(struct bnx2x *bp);
+void bnx2x_iov_task(struct work_struct *work);
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
+
#else /* CONFIG_BNX2X_SRIOV */
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
-static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
- bool queue_work) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
union event_ring_elem *elem) {return 1; }
-static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
-static inline void bnx2x_vf_mbx(struct bnx2x *bp,
- struct vf_pf_event_data *vfpf_event) {}
+static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event) {}
static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
@@ -830,5 +573,8 @@ static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
+static inline void bnx2x_iov_task(struct work_struct *work) {}
+static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
+
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 3fa6c2a2a5a9..0622884596b2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->leading_rss = cl_id;
q->is_leading = true;
+ q->sp_initialized = true;
}
/* ask the pf to open a queue for the vf */
@@ -672,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
return rc;
}
@@ -894,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
- switch (mode) {
- case BNX2X_RX_MODE_NONE: /* no Rx */
+ /* Ignore everything accept MODE_NONE */
+ if (mode == BNX2X_RX_MODE_NONE) {
req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
- break;
- case BNX2X_RX_MODE_NORMAL:
+ } else {
+ /* Current PF driver will not look at the specific flags,
+ * but they are required when working with older drivers on hv.
+ */
req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- case BNX2X_RX_MODE_ALLMULTI:
- req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- case BNX2X_RX_MODE_PROMISC:
- req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- default:
- BNX2X_ERR("BAD rx mode (%d)\n", mode);
- rc = -EINVAL;
- goto out;
}
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -937,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
rc = -EINVAL;
}
-out:
+
bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
@@ -1047,7 +1036,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
}
static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
+ struct bnx2x_virtf *vf,
+ int vf_rc)
{
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
@@ -1059,7 +1049,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
- resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
/* send response */
vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
@@ -1088,9 +1078,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
mmiowb();
- /* initiate dmae to send the response */
- mbx->flags &= ~VF_MSG_INPROCESS;
-
/* copy the response header including status-done field,
* must be last dmae, must be after FW is acked
*/
@@ -1110,14 +1097,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
return;
mbx_error:
- bnx2x_vf_release(bp, vf, false); /* non blocking */
+ bnx2x_vf_release(bp, vf);
}
static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
- struct bnx2x_virtf *vf)
+ struct bnx2x_virtf *vf,
+ int rc)
{
bnx2x_vf_mbx_resp_single_tlv(bp, vf);
- bnx2x_vf_mbx_resp_send_msg(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
}
static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
@@ -1159,7 +1147,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
- /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+ PFVF_CAP_TPA |
+ PFVF_CAP_TPA_UPDATE);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver));
@@ -1240,8 +1229,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
sizeof(struct channel_list_end_tlv));
/* send the response */
- vf->op_rc = vfop_status;
- bnx2x_vf_mbx_resp_send_msg(bp, vf);
+ bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
}
static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1273,19 +1261,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_init_tlv *init = &mbx->msg->req.init;
+ int rc;
/* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride;
- vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+ rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
/* set VF multiqueue statistics collection mode */
if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
vf->cfg_flags |= VF_CFG_STATS_COALESCE;
/* response */
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* convert MBX queue-flags to standard SP queue-flags */
@@ -1320,16 +1309,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ struct bnx2x_vf_queue_construct_params qctor;
+ int rc = 0;
/* verify vf_qid */
if (setup_q->vf_qid >= vf_rxq_count(vf)) {
BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
setup_q->vf_qid, vf_rxq_count(vf));
- vf->op_rc = -EINVAL;
+ rc = -EINVAL;
goto response;
}
@@ -1347,9 +1334,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_leading_vfq_init(bp, vf, q);
/* re-init the VF operation context */
- memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
- setup_p = &vf->op_params.qctor.prep_qsetup;
- init_p = &vf->op_params.qctor.qstate.params.init;
+ memset(&qctor, 0 ,
+ sizeof(struct bnx2x_vf_queue_construct_params));
+ setup_p = &qctor.prep_qsetup;
+ init_p = &qctor.qstate.params.init;
/* activate immediately */
__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
@@ -1435,44 +1423,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
q->index, q->sb_idx);
}
/* complete the preparations */
- bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
+ bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
- vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
- if (vf->op_rc)
+ rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
+ if (rc)
goto response;
- return;
}
response:
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
-enum bnx2x_vfop_filters_state {
- BNX2X_VFOP_MBX_Q_FILTERS_MACS,
- BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
- BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
- BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
- BNX2X_VFOP_MBX_Q_FILTERS_DONE
-};
-
static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct vfpf_set_q_filters_tlv *tlv,
- struct bnx2x_vfop_filters **pfl,
+ struct bnx2x_vf_mac_vlan_filters **pfl,
u32 type_flag)
{
int i, j;
- struct bnx2x_vfop_filters *fl = NULL;
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
size_t fsz;
- fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
- sizeof(struct bnx2x_vfop_filters);
+ fsz = tlv->n_mac_vlan_filters *
+ sizeof(struct bnx2x_vf_mac_vlan_filter) +
+ sizeof(struct bnx2x_vf_mac_vlan_filters);
fl = kzalloc(fsz, GFP_KERNEL);
if (!fl)
return -ENOMEM;
- INIT_LIST_HEAD(&fl->head);
-
for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
@@ -1480,17 +1458,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
continue;
if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
fl->filters[j].mac = msg_filter->mac;
- fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
+ fl->filters[j].type = BNX2X_VF_FILTER_MAC;
} else {
fl->filters[j].vid = msg_filter->vlan_tag;
- fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
+ fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
}
fl->filters[j].add =
(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
true : false;
- list_add_tail(&fl->filters[j++].link, &fl->head);
+ fl->count++;
}
- if (list_empty(&fl->head))
+ if (!fl->count)
kfree(fl);
else
*pfl = fl;
@@ -1530,180 +1508,96 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
-static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
- int rc;
+ int rc = 0;
struct vfpf_set_q_filters_tlv *msg =
&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
- struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
- enum bnx2x_vfop_filters_state state = vfop->state;
-
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vfop_mbx_qfilters,
- .block = false,
- };
-
- DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
-
- if (vfop->rc < 0)
- goto op_err;
+ /* check for any mac/vlan changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build mac list */
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
- switch (state) {
- case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_MAC_FILTER);
+ if (rc)
+ goto op_err;
- /* check for any vlan/mac changes */
- if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
- /* build mac list */
- struct bnx2x_vfop_filters *fl = NULL;
+ if (fl) {
- vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_MAC_FILTER);
- if (vfop->rc)
+ /* set mac list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
goto op_err;
-
- if (fl) {
- /* set mac list */
- rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
- msg->vf_qid,
- false);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
}
- /* fall through */
- case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
+ /* build vlan list */
+ fl = NULL;
- /* check for any vlan/mac changes */
- if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
- /* build vlan list */
- struct bnx2x_vfop_filters *fl = NULL;
-
- vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_VLAN_FILTER);
- if (vfop->rc)
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
goto op_err;
-
- if (fl) {
- /* set vlan list */
- rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
- msg->vf_qid,
- false);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
}
- /* fall through */
-
- case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
-
- if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
- unsigned long accept = 0;
- struct pf_vf_bulletin_content *bulletin =
- BP_VF_BULLETIN(bp, vf->index);
-
- /* covert VF-PF if mask to bnx2x accept flags */
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
- __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
-
- if (msg->rx_mask &
- VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
- __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
-
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
- __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
-
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
+ }
- if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
- __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+ if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+ unsigned long accept = 0;
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
- /* A packet arriving the vf's mac should be accepted
- * with any vlan, unless a vlan has already been
- * configured.
- */
- if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
-
- /* set rx-mode */
- rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
- msg->vf_qid, accept);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
+ /* Ignore VF requested mode; instead set a regular mode */
+ if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
}
- /* fall through */
-
- case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
- /* next state */
- vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
-
- if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
- /* set mcasts */
- rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
- msg->n_multicast, false);
- if (rc) {
- vfop->rc = rc;
- goto op_err;
- }
- return;
- }
- /* fall through */
-op_done:
- case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
- bnx2x_vfop_end(bp, vf, vfop);
- return;
-op_err:
- BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
- vf->abs_vfid, msg->vf_qid, vfop->rc);
- goto op_done;
- default:
- bnx2x_vfop_default(state);
+ /* A packet arriving the vf's mac should be accepted
+ * with any vlan, unless a vlan has already been
+ * configured.
+ */
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+
+ /* set rx-mode */
+ rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
+ if (rc)
+ goto op_err;
}
-}
-static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vfop_cmd *cmd)
-{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- if (vfop) {
- bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
- bnx2x_vfop_mbx_qfilters, cmd->done);
- return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
- cmd->block);
+ if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+ /* set mcasts */
+ rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
+ msg->n_multicast, false);
+ if (rc)
+ goto op_err;
}
- return -ENOMEM;
+op_err:
+ if (rc)
+ BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+ vf->abs_vfid, msg->vf_qid, rc);
+ return rc;
}
-static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vf_mbx *mbx)
+static int bnx2x_filters_validate_mac(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
{
- struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc = 0;
/* if a mac was already set for this VF via the set vf mac ndo, we only
* accept mac configurations of that mac. Why accept them at all?
@@ -1715,7 +1609,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
if (filters->n_mac_vlan_filters > 1) {
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid);
- vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
@@ -1725,10 +1619,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
- vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
}
+
+response:
+ return rc;
+}
+
+static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ int rc = 0;
+
/* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
int i;
@@ -1739,14 +1645,35 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
VFPF_Q_FILTER_VLAN_TAG_VALID) {
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
vf->abs_vfid);
- vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
}
}
/* verify vf_qid */
- if (filters->vf_qid > vf_rxq_count(vf))
+ if (filters->vf_qid > vf_rxq_count(vf)) {
+ rc = -EPERM;
+ goto response;
+ }
+
+response:
+ return rc;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ int rc;
+
+ rc = bnx2x_filters_validate_mac(bp, vf, filters);
+ if (rc)
+ goto response;
+
+ rc = bnx2x_filters_validate_vlan(bp, vf, filters);
+ if (rc)
goto response;
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
@@ -1756,125 +1683,169 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
/* print q_filter message */
bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
- vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
- if (vf->op_rc)
- goto response;
- return;
-
+ rc = bnx2x_vf_mbx_qfilters(bp, vf);
response:
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
int qid = mbx->msg->req.q_op.vf_qid;
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
vf->abs_vfid, qid);
- vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ rc = bnx2x_vf_queue_teardown(bp, vf, qid);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
- vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ rc = bnx2x_vf_close(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc;
DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
- vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ rc = bnx2x_vf_free(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
- struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
+ struct bnx2x_config_rss_params rss;
struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+ int rc = 0;
if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
vf->index);
- vf->op_rc = -EINVAL;
+ rc = -EINVAL;
goto mbx_resp;
}
+ memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
+
/* set vfop params according to rss tlv */
- memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
+ memcpy(rss.ind_table, rss_tlv->ind_table,
T_ETH_INDIRECTION_TABLE_SIZE);
- memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
- sizeof(rss_tlv->rss_key));
- vf_op_params->rss_obj = &vf->rss_conf_obj;
- vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
+ memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
+ rss.rss_obj = &vf->rss_conf_obj;
+ rss.rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
- vf_op_params->rss_flags = 0;
- vf_op_params->ramrod_flags = 0;
+ rss.rss_flags = 0;
+ rss.ramrod_flags = 0;
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
- __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
- __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
- __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
- __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
- __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
- __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
- __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
- __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
- __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
(!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
BNX2X_ERR("about to hit a FW assert. aborting...\n");
- vf->op_rc = -EINVAL;
+ rc = -EINVAL;
goto mbx_resp;
}
- vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
+ rc = bnx2x_vf_rss_update(bp, vf, &rss);
+mbx_resp:
+ bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+ struct vfpf_tpa_tlv *tpa_tlv)
+{
+ int rc = 0;
+
+ if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+ U_ETH_MAX_SGES_FOR_PACKET) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_sges_for_packet,
+ U_ETH_MAX_SGES_FOR_PACKET);
+ }
+
+ if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_tpa_queues,
+ MAX_AGG_QS(bp));
+ }
+
+ return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_queue_update_tpa_params vf_op_params;
+ struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+ int rc = 0;
+
+ memset(&vf_op_params, 0, sizeof(vf_op_params));
+
+ if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+ goto mbx_resp;
+
+ vf_op_params.complete_on_both_clients =
+ tpa_tlv->tpa_client_info.complete_on_both_clients;
+ vf_op_params.dont_verify_thr =
+ tpa_tlv->tpa_client_info.dont_verify_thr;
+ vf_op_params.max_agg_sz =
+ tpa_tlv->tpa_client_info.max_agg_size;
+ vf_op_params.max_sges_pkt =
+ tpa_tlv->tpa_client_info.max_sges_for_packet;
+ vf_op_params.max_tpa_queues =
+ tpa_tlv->tpa_client_info.max_tpa_queues;
+ vf_op_params.sge_buff_sz =
+ tpa_tlv->tpa_client_info.sge_buff_size;
+ vf_op_params.sge_pause_thr_high =
+ tpa_tlv->tpa_client_info.sge_pause_thr_high;
+ vf_op_params.sge_pause_thr_low =
+ tpa_tlv->tpa_client_info.sge_pause_thr_low;
+ vf_op_params.tpa_mode =
+ tpa_tlv->tpa_client_info.tpa_mode;
+ vf_op_params.update_ipv4 =
+ tpa_tlv->tpa_client_info.update_ipv4;
+ vf_op_params.update_ipv6 =
+ tpa_tlv->tpa_client_info.update_ipv6;
+
+ rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
mbx_resp:
- if (vf->op_rc)
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, rc);
}
/* dispatch request */
@@ -1916,6 +1887,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx);
return;
+ case CHANNEL_TLV_UPDATE_TPA:
+ bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+ return;
}
} else {
@@ -1935,11 +1909,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* can we respond to VF (do we have an address for it?) */
if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
- /* mbx_resp uses the op_rc of the VF */
- vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
-
/* notify the VF that we do not support this request */
- bnx2x_vf_mbx_resp(bp, vf);
+ bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
} else {
/* can't send a response since this VF is unknown to us
* just ack the FW to release the mailbox and unlock
@@ -1952,13 +1923,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
}
-/* handle new vf-pf message */
-void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event)
{
- struct bnx2x_virtf *vf;
- struct bnx2x_vf_mbx *mbx;
u8 vf_idx;
- int rc;
DP(BNX2X_MSG_IOV,
"vf pf event received: vfid %d, address_hi %x, address lo %x",
@@ -1970,50 +1938,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
BNX2X_NR_VIRTFN(bp)) {
BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
- goto mbx_done;
+ return;
}
+
vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
- mbx = BP_VF_MBX(bp, vf_idx);
- /* verify an event is not currently being processed -
- * debug failsafe only
- */
- if (mbx->flags & VF_MSG_INPROCESS) {
- BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
- vfpf_event->vf_id);
- goto mbx_done;
- }
- vf = BP_VF(bp, vf_idx);
+ /* Update VFDB with current message and schedule its handling */
+ mutex_lock(&BP_VFDB(bp)->event_mutex);
+ BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
+ BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
+ BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
+ mutex_unlock(&BP_VFDB(bp)->event_mutex);
- /* save the VF message address */
- mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
- mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
- DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
- mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+ bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
+}
- /* dmae to get the VF request */
- rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
- mbx->vf_addr_hi, mbx->vf_addr_lo,
- sizeof(union vfpf_tlvs)/4);
- if (rc) {
- BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
- goto mbx_error;
- }
+/* handle new vf-pf messages */
+void bnx2x_vf_mbx(struct bnx2x *bp)
+{
+ struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
+ u64 events;
+ u8 vf_idx;
+ int rc;
- /* process the VF message header */
- mbx->first_tlv = mbx->msg->req.first_tlv;
+ if (!vfdb)
+ return;
- /* Clean response buffer to refrain from falsely seeing chains */
- memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+ mutex_lock(&vfdb->event_mutex);
+ events = vfdb->event_occur;
+ vfdb->event_occur = 0;
+ mutex_unlock(&vfdb->event_mutex);
- /* dispatch the request (will prepare the response) */
- bnx2x_vf_mbx_request(bp, vf, mbx);
- goto mbx_done;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
-mbx_error:
- bnx2x_vf_release(bp, vf, false); /* non blocking */
-mbx_done:
- return;
+ /* Handle VFs which have pending events */
+ if (!(events & (1ULL << vf_idx)))
+ continue;
+
+ DP(BNX2X_MSG_IOV,
+ "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
+ vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
+ mbx->first_tlv.resp_msg_offset);
+
+ /* dmae to get the VF request */
+ rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
+ vf->abs_vfid, mbx->vf_addr_hi,
+ mbx->vf_addr_lo,
+ sizeof(union vfpf_tlvs)/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy request VF %d\n",
+ vf->abs_vfid);
+ bnx2x_vf_release(bp, vf);
+ return;
+ }
+
+ /* process the VF message header */
+ mbx->first_tlv = mbx->msg->req.first_tlv;
+
+ /* Clean response buffer to refrain from falsely
+ * seeing chains.
+ */
+ memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
+ /* dispatch the request (will prepare the response) */
+ bnx2x_vf_mbx_request(bp, vf, mbx);
+ }
}
/* propagate local bulletin board to vf */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 208568bc7a71..c922b81170e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
#define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
u32 rx_mask; /* see mask constants at the top of the file */
};
+struct vfpf_tpa_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_tpa_client_info {
+ aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u16 sge_buff_size;
+ u16 max_agg_size;
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+ } tpa_client_info;
+};
+
/* close VF (disable VF) */
struct vfpf_close_tlv {
struct vfpf_first_tlv first_tlv;
@@ -331,6 +351,7 @@ union vfpf_tlvs {
struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release;
struct vfpf_rss_tlv update_rss;
+ struct vfpf_tpa_tlv update_tpa;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
@@ -405,6 +426,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID,
+ CHANNEL_TLV_UPDATE_TPA,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile
new file mode 100644
index 000000000000..31f55a90a197
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCMGENET) += genet.o
+genet-objs := bcmgenet.o bcmmii.o
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
new file mode 100644
index 000000000000..adf8acbddf56
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -0,0 +1,2584 @@
+/*
+ * Broadcom GENET (Gigabit Ethernet) controller driver
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "bcmgenet: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <net/arp.h>
+
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/phy.h>
+
+#include <asm/unaligned.h>
+
+#include "bcmgenet.h"
+
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
+/* Default highest priority queue for multi queue support */
+#define GENET_Q0_PRIORITY 0
+
+#define GENET_DEFAULT_BD_CNT \
+ (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
+
+#define RX_BUF_LENGTH 2048
+#define SKB_ALIGNMENT 32
+
+/* Tx/Rx DMA register offset, skip 256 descriptors */
+#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
+#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
+
+#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
+ TOTAL_DESC * DMA_DESC_SIZE)
+
+#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
+ TOTAL_DESC * DMA_DESC_SIZE)
+
+static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
+ void __iomem *d, u32 value)
+{
+ __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
+ void __iomem *d)
+{
+ return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
+ void __iomem *d,
+ dma_addr_t addr)
+{
+ __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
+
+ /* Register writes to GISB bus can take couple hundred nanoseconds
+ * and are done for each packet, save these expensive writes unless
+ * the platform is explicitely configured for 64-bits/LPAE.
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
+#endif
+}
+
+/* Combined address + length/status setter */
+static inline void dmadesc_set(struct bcmgenet_priv *priv,
+ void __iomem *d, dma_addr_t addr, u32 val)
+{
+ dmadesc_set_length_status(priv, d, val);
+ dmadesc_set_addr(priv, d, addr);
+}
+
+static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
+ void __iomem *d)
+{
+ dma_addr_t addr;
+
+ addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
+
+ /* Register writes to GISB bus can take couple hundred nanoseconds
+ * and are done for each packet, save these expensive writes unless
+ * the platform is explicitely configured for 64-bits/LPAE.
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
+#endif
+ return addr;
+}
+
+#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
+
+#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
+ else
+ return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
+}
+
+static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
+ else
+ bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
+}
+
+/* These macros are defined to deal with register map change
+ * between GENET1.1 and GENET2. Only those currently being used
+ * by driver are defined.
+ */
+static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
+ else
+ return __raw_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
+ else
+ __raw_writel(val, priv->base +
+ priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
+ else
+ return __raw_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
+ else
+ __raw_writel(val, priv->base +
+ priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+/* RX/TX DMA register accessors */
+enum dma_reg {
+ DMA_RING_CFG = 0,
+ DMA_CTRL,
+ DMA_STATUS,
+ DMA_SCB_BURST_SIZE,
+ DMA_ARB_CTRL,
+ DMA_PRIORITY,
+ DMA_RING_PRIORITY,
+};
+
+static const u8 bcmgenet_dma_regs_v3plus[] = {
+ [DMA_RING_CFG] = 0x00,
+ [DMA_CTRL] = 0x04,
+ [DMA_STATUS] = 0x08,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x2C,
+ [DMA_PRIORITY] = 0x30,
+ [DMA_RING_PRIORITY] = 0x38,
+};
+
+static const u8 bcmgenet_dma_regs_v2[] = {
+ [DMA_RING_CFG] = 0x00,
+ [DMA_CTRL] = 0x04,
+ [DMA_STATUS] = 0x08,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x30,
+ [DMA_PRIORITY] = 0x34,
+ [DMA_RING_PRIORITY] = 0x3C,
+};
+
+static const u8 bcmgenet_dma_regs_v1[] = {
+ [DMA_CTRL] = 0x00,
+ [DMA_STATUS] = 0x04,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x30,
+ [DMA_PRIORITY] = 0x34,
+ [DMA_RING_PRIORITY] = 0x3C,
+};
+
+/* Set at runtime once bcmgenet version is known */
+static const u8 *bcmgenet_dma_regs;
+
+static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
+{
+ return netdev_priv(dev_get_drvdata(dev));
+}
+
+static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
+ enum dma_reg r)
+{
+ return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
+ u32 val, enum dma_reg r)
+{
+ __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
+ enum dma_reg r)
+{
+ return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
+ u32 val, enum dma_reg r)
+{
+ __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+/* RDMA/TDMA ring registers and accessors
+ * we merge the common fields and just prefix with T/D the registers
+ * having different meaning depending on the direction
+ */
+enum dma_ring_reg {
+ TDMA_READ_PTR = 0,
+ RDMA_WRITE_PTR = TDMA_READ_PTR,
+ TDMA_READ_PTR_HI,
+ RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
+ TDMA_CONS_INDEX,
+ RDMA_PROD_INDEX = TDMA_CONS_INDEX,
+ TDMA_PROD_INDEX,
+ RDMA_CONS_INDEX = TDMA_PROD_INDEX,
+ DMA_RING_BUF_SIZE,
+ DMA_START_ADDR,
+ DMA_START_ADDR_HI,
+ DMA_END_ADDR,
+ DMA_END_ADDR_HI,
+ DMA_MBUF_DONE_THRESH,
+ TDMA_FLOW_PERIOD,
+ RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
+ TDMA_WRITE_PTR,
+ RDMA_READ_PTR = TDMA_WRITE_PTR,
+ TDMA_WRITE_PTR_HI,
+ RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
+};
+
+/* GENET v4 supports 40-bits pointer addressing
+ * for obvious reasons the LO and HI word parts
+ * are contiguous, but this offsets the other
+ * registers.
+ */
+static const u8 genet_dma_ring_regs_v4[] = {
+ [TDMA_READ_PTR] = 0x00,
+ [TDMA_READ_PTR_HI] = 0x04,
+ [TDMA_CONS_INDEX] = 0x08,
+ [TDMA_PROD_INDEX] = 0x0C,
+ [DMA_RING_BUF_SIZE] = 0x10,
+ [DMA_START_ADDR] = 0x14,
+ [DMA_START_ADDR_HI] = 0x18,
+ [DMA_END_ADDR] = 0x1C,
+ [DMA_END_ADDR_HI] = 0x20,
+ [DMA_MBUF_DONE_THRESH] = 0x24,
+ [TDMA_FLOW_PERIOD] = 0x28,
+ [TDMA_WRITE_PTR] = 0x2C,
+ [TDMA_WRITE_PTR_HI] = 0x30,
+};
+
+static const u8 genet_dma_ring_regs_v123[] = {
+ [TDMA_READ_PTR] = 0x00,
+ [TDMA_CONS_INDEX] = 0x04,
+ [TDMA_PROD_INDEX] = 0x08,
+ [DMA_RING_BUF_SIZE] = 0x0C,
+ [DMA_START_ADDR] = 0x10,
+ [DMA_END_ADDR] = 0x14,
+ [DMA_MBUF_DONE_THRESH] = 0x18,
+ [TDMA_FLOW_PERIOD] = 0x1C,
+ [TDMA_WRITE_PTR] = 0x20,
+};
+
+/* Set at runtime once GENET version is known */
+static const u8 *genet_dma_ring_regs;
+
+static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ enum dma_ring_reg r)
+{
+ return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ u32 val,
+ enum dma_ring_reg r)
+{
+ __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ enum dma_ring_reg r)
+{
+ return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ u32 val,
+ enum dma_ring_reg r)
+{
+ __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static int bcmgenet_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 rbuf_chk_ctrl;
+ bool rx_csum_en;
+
+ rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+
+ rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
+
+ /* enable rx checksumming */
+ if (rx_csum_en)
+ rbuf_chk_ctrl |= RBUF_RXCHK_EN;
+ else
+ rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
+ priv->desc_rxchk_en = rx_csum_en;
+
+ /* If UniMAC forwards CRC, we need to skip over it to get
+ * a valid CHK bit to be set in the per-packet status word
+ */
+ if (rx_csum_en && priv->crc_fwd_en)
+ rbuf_chk_ctrl |= RBUF_SKIP_FCS;
+ else
+ rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
+
+ bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ bool desc_64b_en;
+ u32 tbuf_ctrl, rbuf_ctrl;
+
+ tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
+ rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+
+ desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+
+ /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
+ if (desc_64b_en) {
+ tbuf_ctrl |= RBUF_64B_EN;
+ rbuf_ctrl |= RBUF_64B_EN;
+ } else {
+ tbuf_ctrl &= ~RBUF_64B_EN;
+ rbuf_ctrl &= ~RBUF_64B_EN;
+ }
+ priv->desc_64b_en = desc_64b_en;
+
+ bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
+ bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ netdev_features_t wanted = dev->wanted_features;
+ int ret = 0;
+
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ ret = bcmgenet_set_tx_csum(dev, wanted);
+ if (changed & (NETIF_F_RXCSUM))
+ ret = bcmgenet_set_rx_csum(dev, wanted);
+
+ return ret;
+}
+
+static u32 bcmgenet_get_msglevel(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = level;
+}
+
+/* standard ethtool support functions. */
+enum bcmgenet_stat_type {
+ BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_MIB_RX,
+ BCMGENET_STAT_MIB_TX,
+ BCMGENET_STAT_RUNT,
+ BCMGENET_STAT_MISC,
+};
+
+struct bcmgenet_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_sizeof;
+ int stat_offset;
+ enum bcmgenet_stat_type type;
+ /* reg offset from UMAC base for misc counters */
+ u16 reg_offset;
+};
+
+#define STAT_NETDEV(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+ .stat_offset = offsetof(struct net_device_stats, m), \
+ .type = BCMGENET_STAT_NETDEV, \
+}
+
+#define STAT_GENET_MIB(str, m, _type) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, m), \
+ .type = _type, \
+}
+
+#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
+#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
+#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+
+#define STAT_GENET_MISC(str, m, offset) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, m), \
+ .type = BCMGENET_STAT_MISC, \
+ .reg_offset = offset, \
+}
+
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define BCMGENET_STAT_OFFSET 0xc
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
+ /* general stats */
+ STAT_NETDEV(rx_packets),
+ STAT_NETDEV(tx_packets),
+ STAT_NETDEV(rx_bytes),
+ STAT_NETDEV(tx_bytes),
+ STAT_NETDEV(rx_errors),
+ STAT_NETDEV(tx_errors),
+ STAT_NETDEV(rx_dropped),
+ STAT_NETDEV(tx_dropped),
+ STAT_NETDEV(multicast),
+ /* UniMAC RSV counters */
+ STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+ STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+ STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+ STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+ STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+ STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+ STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+ STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+ STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+ STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+ STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
+ STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
+ STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
+ STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
+ STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
+ STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
+ STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
+ STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
+ STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
+ STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
+ STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
+ STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
+ STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
+ STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
+ STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
+ STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
+ STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
+ STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
+ STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
+ /* UniMAC TSV counters */
+ STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+ STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+ STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+ STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+ STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+ STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+ STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+ STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+ STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+ STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+ STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
+ STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
+ STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
+ STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
+ STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
+ STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
+ STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
+ STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
+ STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
+ STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
+ STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
+ STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
+ STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
+ STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
+ STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
+ STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
+ STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
+ STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
+ STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
+ /* UniMAC RUNT counters */
+ STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+ STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+ STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+ STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ /* Misc UniMAC counters */
+ STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
+ UMAC_RBUF_OVFL_CNT),
+ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+ STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+};
+
+#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+
+static void bcmgenet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strlcpy(info->version, "v2.0", sizeof(info->version));
+ info->n_stats = BCMGENET_STATS_LEN;
+
+}
+
+static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCMGENET_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcmgenet_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcmgenet_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
+{
+ int i, j = 0;
+
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ const struct bcmgenet_stats *s;
+ u8 offset = 0;
+ u32 val = 0;
+ char *p;
+
+ s = &bcmgenet_gstrings_stats[i];
+ switch (s->type) {
+ case BCMGENET_STAT_NETDEV:
+ continue;
+ case BCMGENET_STAT_MIB_RX:
+ case BCMGENET_STAT_MIB_TX:
+ case BCMGENET_STAT_RUNT:
+ if (s->type != BCMGENET_STAT_MIB_RX)
+ offset = BCMGENET_STAT_OFFSET;
+ val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
+ j + offset);
+ break;
+ case BCMGENET_STAT_MISC:
+ val = bcmgenet_umac_readl(priv, s->reg_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0, s->reg_offset);
+ break;
+ }
+
+ j += s->stat_sizeof;
+ p = (char *)priv + s->stat_offset;
+ *(u32 *)p = val;
+ }
+}
+
+static void bcmgenet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_running(dev))
+ bcmgenet_update_mib_counters(priv);
+
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ const struct bcmgenet_stats *s;
+ char *p;
+
+ s = &bcmgenet_gstrings_stats[i];
+ if (s->type == BCMGENET_STAT_NETDEV)
+ p = (char *)&dev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = *(u32 *)p;
+ }
+}
+
+/* standard ethtool support functions. */
+static struct ethtool_ops bcmgenet_ethtool_ops = {
+ .get_strings = bcmgenet_get_strings,
+ .get_sset_count = bcmgenet_get_sset_count,
+ .get_ethtool_stats = bcmgenet_get_ethtool_stats,
+ .get_settings = bcmgenet_get_settings,
+ .set_settings = bcmgenet_set_settings,
+ .get_drvinfo = bcmgenet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = bcmgenet_get_msglevel,
+ .set_msglevel = bcmgenet_set_msglevel,
+};
+
+/* Power down the unimac, based on mode. */
+static void bcmgenet_power_down(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
+{
+ u32 reg;
+
+ switch (mode) {
+ case GENET_POWER_CABLE_SENSE:
+ phy_detach(priv->phydev);
+ break;
+
+ case GENET_POWER_PASSIVE:
+ /* Power down LED */
+ bcmgenet_mii_reset(priv->dev);
+ if (priv->hw_params->flags & GENET_HAS_EXT) {
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= (EXT_PWR_DOWN_PHY |
+ EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
+{
+ u32 reg;
+
+ if (!(priv->hw_params->flags & GENET_HAS_EXT))
+ return;
+
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+
+ switch (mode) {
+ case GENET_POWER_PASSIVE:
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
+ EXT_PWR_DOWN_BIAS);
+ /* fallthrough */
+ case GENET_POWER_CABLE_SENSE:
+ /* enable APD */
+ reg |= EXT_PWR_DN_EN_LD;
+ break;
+ default:
+ break;
+ }
+
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_mii_reset(priv->dev);
+}
+
+/* ioctl handle special commands that are not present in ethtool. */
+static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int val = 0;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ val = -ENODEV;
+ else
+ val = phy_mii_ioctl(priv->phydev, rq, cmd);
+ break;
+
+ default:
+ val = -EINVAL;
+ break;
+ }
+
+ return val;
+}
+
+static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct enet_cb *tx_cb_ptr;
+
+ tx_cb_ptr = ring->cbs;
+ tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+ tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
+ /* Advancing local write pointer */
+ if (ring->write_ptr == ring->end_ptr)
+ ring->write_ptr = ring->cb_ptr;
+ else
+ ring->write_ptr++;
+
+ return tx_cb_ptr;
+}
+
+/* Simple helper to free a control block's resources */
+static void bcmgenet_free_cb(struct enet_cb *cb)
+{
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(priv,
+ (1 << ring->index), INTRL2_CPU_MASK_CLEAR);
+ priv->int1_mask &= ~(1 << ring->index);
+}
+
+static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(priv,
+ (1 << ring->index), INTRL2_CPU_MASK_SET);
+ priv->int1_mask |= (1 << ring->index);
+}
+
+/* Unlocked version of the reclaim routine */
+static void __bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int last_tx_cn, last_c_index, num_tx_bds;
+ struct enet_cb *tx_cb_ptr;
+ struct netdev_queue *txq;
+ unsigned int c_index;
+
+ /* Compute how many buffers are transmited since last xmit call */
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+ txq = netdev_get_tx_queue(dev, ring->queue);
+
+ last_c_index = ring->c_index;
+ num_tx_bds = ring->size;
+
+ c_index &= (num_tx_bds - 1);
+
+ if (c_index >= last_c_index)
+ last_tx_cn = c_index - last_c_index;
+ else
+ last_tx_cn = num_tx_bds - last_c_index + c_index;
+
+ netif_dbg(priv, tx_done, dev,
+ "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
+ __func__, ring->index,
+ c_index, last_tx_cn, last_c_index);
+
+ /* Reclaim transmitted buffers */
+ while (last_tx_cn-- > 0) {
+ tx_cb_ptr = ring->cbs + last_c_index;
+ if (tx_cb_ptr->skb) {
+ dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ dma_unmap_single(&dev->dev,
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ tx_cb_ptr->skb->len,
+ DMA_TO_DEVICE);
+ bcmgenet_free_cb(tx_cb_ptr);
+ } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+ dev->stats.tx_bytes +=
+ dma_unmap_len(tx_cb_ptr, dma_len);
+ dma_unmap_page(&dev->dev,
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ dma_unmap_len(tx_cb_ptr, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+ }
+ dev->stats.tx_packets++;
+ ring->free_bds += 1;
+
+ last_c_index++;
+ last_c_index &= (num_tx_bds - 1);
+ }
+
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1))
+ ring->int_disable(priv, ring);
+
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+
+ ring->c_index = c_index;
+}
+
+static void bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ __bcmgenet_tx_reclaim(dev, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
+static void bcmgenet_tx_reclaim_all(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_is_multiqueue(dev)) {
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
+ }
+
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+}
+
+/* Transmits a single SKB (either head of a fragment or a single SKB)
+ * caller must hold priv->lock
+ */
+static int bcmgenet_xmit_single(struct net_device *dev,
+ struct sk_buff *skb,
+ u16 dma_desc_flags,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct enet_cb *tx_cb_ptr;
+ unsigned int skb_len;
+ dma_addr_t mapping;
+ u32 length_status;
+ int ret;
+
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+
+ tx_cb_ptr->skb = skb;
+
+ skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+
+ mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
+ length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+ DMA_TX_APPEND_CRC;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ length_status |= DMA_TX_DO_CSUM;
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
+
+ /* Decrement total BD count and advance our write pointer */
+ ring->free_bds -= 1;
+ ring->prod_index += 1;
+ ring->prod_index &= DMA_P_INDEX_MASK;
+
+ return 0;
+}
+
+/* Transmit a SKB fragement */
+static int bcmgenet_xmit_frag(struct net_device *dev,
+ skb_frag_t *frag,
+ u16 dma_desc_flags,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct enet_cb *tx_cb_ptr;
+ dma_addr_t mapping;
+ int ret;
+
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+ tx_cb_ptr->skb = NULL;
+
+ mapping = skb_frag_dma_map(kdev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
+ __func__);
+ return ret;
+ }
+
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
+ (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+
+
+ ring->free_bds -= 1;
+ ring->prod_index += 1;
+ ring->prod_index &= DMA_P_INDEX_MASK;
+
+ return 0;
+}
+
+/* Reallocate the SKB to put enough headroom in front of it and insert
+ * the transmit checksum offsets in the descriptors
+ */
+static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
+{
+ struct status_64 *status = NULL;
+ struct sk_buff *new_skb;
+ u16 offset;
+ u8 ip_proto;
+ u16 ip_ver;
+ u32 tx_csum_info;
+
+ if (unlikely(skb_headroom(skb) < sizeof(*status))) {
+ /* If 64 byte status block enabled, must make sure skb has
+ * enough headroom for us to insert 64B status block.
+ */
+ new_skb = skb_realloc_headroom(skb, sizeof(*status));
+ dev_kfree_skb(skb);
+ if (!new_skb) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ skb = new_skb;
+ }
+
+ skb_push(skb, sizeof(*status));
+ status = (struct status_64 *)skb->data;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip_ver = htons(skb->protocol);
+ switch (ip_ver) {
+ case ETH_P_IP:
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return 0;
+ }
+
+ offset = skb_checksum_start_offset(skb) - sizeof(*status);
+ tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
+ (offset + skb->csum_offset);
+
+ /* Set the length valid bit for TCP and UDP and just set
+ * the special UDP flag for IPv4, else just set to 0.
+ */
+ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+ tx_csum_info |= STATUS_TX_CSUM_LV;
+ if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+ tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
+ } else
+ tx_csum_info = 0;
+
+ status->tx_csum_info = tx_csum_info;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_tx_ring *ring = NULL;
+ struct netdev_queue *txq;
+ unsigned long flags = 0;
+ int nr_frags, index;
+ u16 dma_desc_flags;
+ int ret;
+ int i;
+
+ index = skb_get_queue_mapping(skb);
+ /* Mapping strategy:
+ * queue_mapping = 0, unclassified, packet xmited through ring16
+ * queue_mapping = 1, goes to ring 0. (highest priority queue
+ * queue_mapping = 2, goes to ring 1.
+ * queue_mapping = 3, goes to ring 2.
+ * queue_mapping = 4, goes to ring 3.
+ */
+ if (index == 0)
+ index = DESC_INDEX;
+ else
+ index -= 1;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ ring = &priv->tx_rings[index];
+ txq = netdev_get_tx_queue(dev, ring->queue);
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (ring->free_bds <= nr_frags + 1) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
+ __func__, index, ring->queue);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* set the SKB transmit checksum */
+ if (priv->desc_64b_en) {
+ ret = bcmgenet_put_tx_csum(dev, skb);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ dma_desc_flags = DMA_SOP;
+ if (nr_frags == 0)
+ dma_desc_flags |= DMA_EOP;
+
+ /* Transmit single SKB or head of fragment list */
+ ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* xmit fragment */
+ for (i = 0; i < nr_frags; i++) {
+ ret = bcmgenet_xmit_frag(dev,
+ &skb_shinfo(skb)->frags[i],
+ (i == nr_frags - 1) ? DMA_EOP : 0, ring);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ skb_tx_timestamp(skb);
+
+ /* we kept a software copy of how much we should advance the TDMA
+ * producer index, now write it down to the hardware
+ */
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+
+ if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+ netif_tx_stop_queue(txq);
+ ring->int_enable(priv, ring);
+ }
+
+out:
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return ret;
+}
+
+
+static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
+ struct enet_cb *cb)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int ret;
+
+ skb = netdev_alloc_skb(priv->dev,
+ priv->rx_buf_len + SKB_ALIGNMENT);
+ if (!skb)
+ return -ENOMEM;
+
+ /* a caller did not release this control block */
+ WARN_ON(cb->skb != NULL);
+ cb->skb = skb;
+ mapping = dma_map_single(kdev, skb->data,
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ bcmgenet_free_cb(cb);
+ netif_err(priv, rx_err, priv->dev,
+ "%s DMA map failed\n", __func__);
+ return ret;
+ }
+
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ /* assign packet, prepare descriptor, and advance pointer */
+
+ dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+ /* turn on the newly assigned BD for DMA to use */
+ priv->rx_bd_assign_index++;
+ priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+
+ priv->rx_bd_assign_ptr = priv->rx_bds +
+ (priv->rx_bd_assign_index * DMA_DESC_SIZE);
+
+ return 0;
+}
+
+/* bcmgenet_desc_rx - descriptor based rx process.
+ * this could be called from bottom half, or from NAPI polling method.
+ */
+static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
+ unsigned int budget)
+{
+ struct net_device *dev = priv->dev;
+ struct enet_cb *cb;
+ struct sk_buff *skb;
+ u32 dma_length_status;
+ unsigned long dma_flag;
+ int len, err;
+ unsigned int rxpktprocessed = 0, rxpkttoprocess;
+ unsigned int p_index;
+ unsigned int chksum_ok = 0;
+
+ p_index = bcmgenet_rdma_ring_readl(priv,
+ DESC_INDEX, RDMA_PROD_INDEX);
+ p_index &= DMA_P_INDEX_MASK;
+
+ if (p_index < priv->rx_c_index)
+ rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
+ priv->rx_c_index + p_index;
+ else
+ rxpkttoprocess = p_index - priv->rx_c_index;
+
+ netif_dbg(priv, rx_status, dev,
+ "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+
+ while ((rxpktprocessed < rxpkttoprocess) &&
+ (rxpktprocessed < budget)) {
+
+ /* Unmap the packet contents such that we can use the
+ * RSV from the 64 bytes descriptor when enabled and save
+ * a 32-bits register read
+ */
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+ dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+
+ if (!priv->desc_64b_en) {
+ dma_length_status = dmadesc_get_length_status(priv,
+ priv->rx_bds +
+ (priv->rx_read_ptr *
+ DMA_DESC_SIZE));
+ } else {
+ struct status_64 *status;
+ status = (struct status_64 *)skb->data;
+ dma_length_status = status->length_status;
+ }
+
+ /* DMA flags and length are still valid no matter how
+ * we got the Receive Status Vector (64B RSB or register)
+ */
+ dma_flag = dma_length_status & 0xffff;
+ len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
+
+ netif_dbg(priv, rx_status, dev,
+ "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+ __func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
+ dma_length_status);
+
+ rxpktprocessed++;
+
+ priv->rx_read_ptr++;
+ priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+ /* out of memory, just drop packets at the hardware level */
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ goto refill;
+ }
+
+ if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
+ netif_err(priv, rx_status, dev,
+ "Droping fragmented packet!\n");
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ goto refill;
+ }
+ /* report errors */
+ if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER))) {
+ netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
+ (unsigned int)dma_flag);
+ if (dma_flag & DMA_RX_CRC_ERROR)
+ dev->stats.rx_crc_errors++;
+ if (dma_flag & DMA_RX_OV)
+ dev->stats.rx_over_errors++;
+ if (dma_flag & DMA_RX_NO)
+ dev->stats.rx_frame_errors++;
+ if (dma_flag & DMA_RX_LG)
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+
+ /* discard the packet and advance consumer index.*/
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ goto refill;
+ } /* error packet */
+
+ chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
+ priv->desc_rxchk_en;
+
+ skb_put(skb, len);
+ if (priv->desc_64b_en) {
+ skb_pull(skb, 64);
+ len -= 64;
+ }
+
+ if (likely(chksum_ok))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* remove hardware 2bytes added for IP alignment */
+ skb_pull(skb, 2);
+ len -= 2;
+
+ if (priv->crc_fwd_en) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ /*Finish setting up the received SKB and send it to the kernel*/
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ if (dma_flag & DMA_RX_MULT)
+ dev->stats.multicast++;
+
+ /* Notify kernel */
+ napi_gro_receive(&priv->napi, skb);
+ cb->skb = NULL;
+ netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
+
+ /* refill RX path on the current control block */
+refill:
+ err = bcmgenet_rx_refill(priv, cb);
+ if (err)
+ netif_err(priv, rx_err, dev, "Rx refill failed\n");
+ }
+
+ return rxpktprocessed;
+}
+
+/* Assign skb to RX DMA descriptor. */
+static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
+{
+ struct enet_cb *cb;
+ int ret = 0;
+ int i;
+
+ netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
+
+ /* loop here for each buffer needing assign */
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+ if (cb->skb)
+ continue;
+
+ /* set the DMA descriptor length once and for all
+ * it will only change if we support dynamically sizing
+ * priv->rx_buf_len, but we do not
+ */
+ dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
+ priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
+
+ ret = bcmgenet_rx_refill(priv, cb);
+ if (ret)
+ break;
+
+ }
+
+ return ret;
+}
+
+static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
+{
+ struct enet_cb *cb;
+ int i;
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[i];
+
+ if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_single(&priv->dev->dev,
+ dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ if (cb->skb)
+ bcmgenet_free_cb(cb);
+ }
+}
+
+static int reset_umac(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int timeout = 0;
+ u32 reg;
+
+ /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
+ bcmgenet_rbuf_ctrl_set(priv, 0);
+ udelay(10);
+
+ /* disable MAC while updating its registers */
+ bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+
+ /* issue soft reset, wait for it to complete */
+ bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+ while (timeout++ < 1000) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (!(reg & CMD_SW_RESET))
+ return 0;
+
+ udelay(1);
+ }
+
+ if (timeout == 1000) {
+ dev_err(kdev,
+ "timeout waiting for MAC to come out of resetn\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int init_umac(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ int ret;
+ u32 reg, cpu_mask_clear;
+
+ dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
+
+ ret = reset_umac(priv);
+ if (ret)
+ return ret;
+
+ bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+ /* clear tx/rx counter */
+ bcmgenet_umac_writel(priv,
+ MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
+ bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
+
+ bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+ /* init rx registers, enable ip header optimization */
+ reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+ reg |= RBUF_ALIGN_2B;
+ bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
+
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
+ bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
+
+ /* Mask all interrupts.*/
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+ cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+
+ dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+
+ /* Monitor cable plug/unpluged event for internal PHY */
+ if (phy_is_internal(priv->phydev))
+ cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ else if (priv->ext_phy)
+ cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ reg = bcmgenet_bp_mc_get(priv);
+ reg |= BIT(priv->hw_params->bp_in_en_shift);
+
+ /* bp_mask: back pressure mask */
+ if (netif_is_multiqueue(priv->dev))
+ reg |= priv->hw_params->bp_in_mask;
+ else
+ reg &= ~priv->hw_params->bp_in_mask;
+ bcmgenet_bp_mc_set(priv, reg);
+ }
+
+ /* Enable MDIO interrupts on GENET v3+ */
+ if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
+ cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
+
+ bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
+ INTRL2_CPU_MASK_CLEAR);
+
+ /* Enable rx/tx engine.*/
+ dev_dbg(kdev, "done init umac\n");
+
+ return 0;
+}
+
+/* Initialize all house-keeping variables for a TX ring, along
+ * with corresponding hardware registers
+ */
+static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+ unsigned int index, unsigned int size,
+ unsigned int write_ptr, unsigned int end_ptr)
+{
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+ u32 words_per_bd = WORDS_PER_BD(priv);
+ u32 flow_period_val = 0;
+ unsigned int first_bd;
+
+ spin_lock_init(&ring->lock);
+ ring->index = index;
+ if (index == DESC_INDEX) {
+ ring->queue = 0;
+ ring->int_enable = bcmgenet_tx_ring16_int_enable;
+ ring->int_disable = bcmgenet_tx_ring16_int_disable;
+ } else {
+ ring->queue = index + 1;
+ ring->int_enable = bcmgenet_tx_ring_int_enable;
+ ring->int_disable = bcmgenet_tx_ring_int_disable;
+ }
+ ring->cbs = priv->tx_cbs + write_ptr;
+ ring->size = size;
+ ring->c_index = 0;
+ ring->free_bds = size;
+ ring->write_ptr = write_ptr;
+ ring->cb_ptr = write_ptr;
+ ring->end_ptr = end_ptr - 1;
+ ring->prod_index = 0;
+
+ /* Set flow period for ring != 16 */
+ if (index != DESC_INDEX)
+ flow_period_val = ENET_MAX_MTU_SIZE << 16;
+
+ bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
+ bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
+ bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
+ /* Disable rate control for now */
+ bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
+ TDMA_FLOW_PERIOD);
+ /* Unclassified traffic goes to ring 16 */
+ bcmgenet_tdma_ring_writel(priv, index,
+ ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+ DMA_RING_BUF_SIZE);
+
+ first_bd = write_ptr;
+
+ /* Set start and end address, read and write pointers */
+ bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ DMA_START_ADDR);
+ bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ TDMA_READ_PTR);
+ bcmgenet_tdma_ring_writel(priv, index, first_bd,
+ TDMA_WRITE_PTR);
+ bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+ DMA_END_ADDR);
+}
+
+/* Initialize a RDMA ring */
+static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
+ unsigned int index, unsigned int size)
+{
+ u32 words_per_bd = WORDS_PER_BD(priv);
+ int ret;
+
+ priv->num_rx_bds = TOTAL_DESC;
+ priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
+ priv->rx_bd_assign_ptr = priv->rx_bds;
+ priv->rx_bd_assign_index = 0;
+ priv->rx_c_index = 0;
+ priv->rx_read_ptr = 0;
+ priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->rx_cbs)
+ return -ENOMEM;
+
+ ret = bcmgenet_alloc_rx_buffers(priv);
+ if (ret) {
+ kfree(priv->rx_cbs);
+ return ret;
+ }
+
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
+ bcmgenet_rdma_ring_writel(priv, index,
+ ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+ DMA_RING_BUF_SIZE);
+ bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
+ bcmgenet_rdma_ring_writel(priv, index,
+ words_per_bd * size - 1, DMA_END_ADDR);
+ bcmgenet_rdma_ring_writel(priv, index,
+ (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
+ DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
+
+ return ret;
+}
+
+/* init multi xmit queues, only available for GENET2+
+ * the queue is partitioned as follows:
+ *
+ * queue 0 - 3 is priority based, each one has 32 descriptors,
+ * with queue 0 being the highest priority queue.
+ *
+ * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
+ * descriptors: 256 - (number of tx queues * bds per queues) = 128
+ * descriptors.
+ *
+ * The transmit control block pool is then partitioned as following:
+ * - tx_cbs[0...127] are for queue 16
+ * - tx_ring_cbs[0] points to tx_cbs[128..159]
+ * - tx_ring_cbs[1] points to tx_cbs[160..191]
+ * - tx_ring_cbs[2] points to tx_cbs[192..223]
+ * - tx_ring_cbs[3] points to tx_cbs[224..255]
+ */
+static void bcmgenet_init_multiq(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned int i, dma_enable;
+ u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0;
+
+ if (!netif_is_multiqueue(dev)) {
+ netdev_warn(dev, "called with non multi queue aware HW\n");
+ return;
+ }
+
+ dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ dma_enable = dma_ctrl & DMA_EN;
+ dma_ctrl &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+ /* Enable strict priority arbiter mode */
+ bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
+
+ for (i = 0; i < priv->hw_params->tx_queues; i++) {
+ /* first 64 tx_cbs are reserved for default tx queue
+ * (ring 16)
+ */
+ bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
+ i * priv->hw_params->bds_cnt,
+ (i + 1) * priv->hw_params->bds_cnt);
+
+ /* Configure ring as decriptor ring and setup priority */
+ ring_cfg |= 1 << i;
+ dma_priority |= ((GENET_Q0_PRIORITY + i) <<
+ (GENET_MAX_MQ_CNT + 1) * i);
+ dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
+ }
+
+ /* Enable rings */
+ reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
+ reg |= ring_cfg;
+ bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
+
+ /* Use configured rings priority and set ring #16 priority */
+ reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY);
+ reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20);
+ reg |= dma_priority;
+ bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY);
+
+ /* Configure ring as descriptor ring and re-enable DMA if enabled */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ if (dma_enable)
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ /* disable DMA */
+ bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
+ bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+
+ for (i = 0; i < priv->num_tx_bds; i++) {
+ if (priv->tx_cbs[i].skb != NULL) {
+ dev_kfree_skb(priv->tx_cbs[i].skb);
+ priv->tx_cbs[i].skb = NULL;
+ }
+ }
+
+ bcmgenet_free_rx_buffers(priv);
+ kfree(priv->rx_cbs);
+ kfree(priv->tx_cbs);
+}
+
+/* init_edma: Initialize DMA control register */
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+{
+ int ret;
+
+ netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
+
+ /* by default, enable ring 16 (descriptor based) */
+ ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
+ if (ret) {
+ netdev_err(priv->dev, "failed to initialize RX ring\n");
+ return ret;
+ }
+
+ /* init rDma */
+ bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+ /* Init tDma */
+ bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+ /* Initialize commont TX ring structures */
+ priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
+ priv->num_tx_bds = TOTAL_DESC;
+ priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->tx_cbs) {
+ bcmgenet_fini_dma(priv);
+ return -ENOMEM;
+ }
+
+ /* initialize multi xmit queue */
+ bcmgenet_init_multiq(priv->dev);
+
+ /* initialize special ring 16 */
+ bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
+ priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
+ TOTAL_DESC);
+
+ return 0;
+}
+
+/* NAPI polling method*/
+static int bcmgenet_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmgenet_priv *priv = container_of(napi,
+ struct bcmgenet_priv, napi);
+ unsigned int work_done;
+
+ /* tx reclaim */
+ bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+
+ work_done = bcmgenet_desc_rx(priv, budget);
+
+ /* Advancing our consumer index*/
+ priv->rx_c_index += work_done;
+ priv->rx_c_index &= DMA_C_INDEX_MASK;
+ bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
+ priv->rx_c_index, RDMA_CONS_INDEX);
+ if (work_done < budget) {
+ napi_complete(napi);
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
+ }
+
+ return work_done;
+}
+
+/* Interrupt bottom half */
+static void bcmgenet_irq_task(struct work_struct *work)
+{
+ struct bcmgenet_priv *priv = container_of(
+ work, struct bcmgenet_priv, bcmgenet_irq_work);
+
+ netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
+
+ /* Link UP/DOWN event */
+ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+ (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+ phy_mac_interrupt(priv->phydev,
+ priv->irq0_stat & UMAC_IRQ_LINK_UP);
+ priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
+ }
+}
+
+/* bcmgenet_isr1: interrupt handler for ring buffer. */
+static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
+{
+ struct bcmgenet_priv *priv = dev_id;
+ unsigned int index;
+
+ /* Save irq status for bottom-half processing. */
+ priv->irq1_stat =
+ bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+ ~priv->int1_mask;
+ /* clear inerrupts*/
+ bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+
+ netif_dbg(priv, intr, priv->dev,
+ "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+ /* Check the MBDONE interrupts.
+ * packet is done, reclaim descriptors
+ */
+ if (priv->irq1_stat & 0x0000ffff) {
+ index = 0;
+ for (index = 0; index < 16; index++) {
+ if (priv->irq1_stat & (1 << index))
+ bcmgenet_tx_reclaim(priv->dev,
+ &priv->tx_rings[index]);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* bcmgenet_isr0: Handle various interrupts. */
+static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
+{
+ struct bcmgenet_priv *priv = dev_id;
+
+ /* Save irq status for bottom-half processing. */
+ priv->irq0_stat =
+ bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+ ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ /* clear inerrupts*/
+ bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ netif_dbg(priv, intr, priv->dev,
+ "IRQ=0x%x\n", priv->irq0_stat);
+
+ if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
+ /* We use NAPI(software interrupt throttling, if
+ * Rx Descriptor throttling is not used.
+ * Disable interrupt, will be enabled in the poll method.
+ */
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
+ __napi_schedule(&priv->napi);
+ }
+ }
+ if (priv->irq0_stat &
+ (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
+ /* Tx reclaim */
+ bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+ }
+ if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ UMAC_IRQ_PHY_DET_F |
+ UMAC_IRQ_LINK_UP |
+ UMAC_IRQ_LINK_DOWN |
+ UMAC_IRQ_HFB_SM |
+ UMAC_IRQ_HFB_MM |
+ UMAC_IRQ_MPD_R)) {
+ /* all other interested interrupts handled in bottom half */
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
+ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+ priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ wake_up(&priv->wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ reg |= BIT(1);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+
+ reg &= ~BIT(1);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+}
+
+static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr)
+{
+ bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3], UMAC_MAC0);
+ bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
+{
+ int ret;
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ clk_disable(priv->clk_wol);
+ /* init umac registers to synchronize s/w with h/w */
+ ret = init_umac(priv);
+ if (ret)
+ return ret;
+
+ phy_init_hw(priv->phydev);
+ /* Speed settings must be restored */
+ bcmgenet_mii_config(priv->dev);
+
+ return 0;
+}
+
+/* Returns a reusable dma control register value */
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+ u32 dma_ctrl;
+
+ /* disable DMA */
+ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ return dma_ctrl;
+}
+
+static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
+{
+ u32 reg;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static int bcmgenet_open(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long dma_ctrl;
+ u32 reg;
+ int ret;
+
+ netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
+
+ /* Turn on the clock */
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
+
+ ret = init_umac(priv);
+ if (ret)
+ goto err_clk_disable;
+
+ /* disable ethernet MAC while updating its registers */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~(CMD_TX_EN | CMD_RX_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+ if (priv->wol_enabled) {
+ ret = bcmgenet_wol_resume(priv);
+ if (ret)
+ return ret;
+ }
+
+ if (phy_is_internal(priv->phydev)) {
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= EXT_ENERGY_DET_MASK;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
+
+ /* Disable RX/TX DMA and flush TX queues */
+ dma_ctrl = bcmgenet_dma_disable(priv);
+
+ /* Reinitialize TDMA and RDMA and SW housekeeping */
+ ret = bcmgenet_init_dma(priv);
+ if (ret) {
+ netdev_err(dev, "failed to initialize DMA\n");
+ goto err_fini_dma;
+ }
+
+ /* Always enable ring 16 - descriptor ring */
+ bcmgenet_enable_dma(priv, dma_ctrl);
+
+ ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
+ dev->name, priv);
+ if (ret < 0) {
+ netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
+ goto err_fini_dma;
+ }
+
+ ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
+ dev->name, priv);
+ if (ret < 0) {
+ netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
+ goto err_irq0;
+ }
+
+ /* Start the network engine */
+ napi_enable(&priv->napi);
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg |= (CMD_TX_EN | CMD_RX_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ /* Make sure we reflect the value of CRC_CMD_FWD */
+ priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+
+ device_set_wakeup_capable(&dev->dev, 1);
+
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ netif_tx_start_all_queues(dev);
+
+ phy_start(priv->phydev);
+
+ return 0;
+
+err_irq0:
+ free_irq(priv->irq0, dev);
+err_fini_dma:
+ bcmgenet_fini_dma(priv);
+err_clk_disable:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+ int ret = 0;
+ int timeout = 0;
+ u32 reg;
+
+ /* Disable TDMA to stop add more frames in TX DMA */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check TDMA status register to confirm TDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait 10ms for packet drain in both tx and rx dma */
+ usleep_range(10000, 20000);
+
+ /* Disable RDMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ timeout = 0;
+ /* Check RDMA status register to confirm RDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev,
+ "Timed out while disabling RX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int bcmgenet_close(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret;
+ u32 reg;
+
+ netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
+
+ phy_stop(priv->phydev);
+
+ /* Disable MAC receive */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_RX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ netif_tx_stop_all_queues(dev);
+
+ ret = bcmgenet_dma_teardown(priv);
+ if (ret)
+ return ret;
+
+ /* Disable MAC transmit. TX DMA disabled have to done before this */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_TX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ napi_disable(&priv->napi);
+
+ /* tx reclaim */
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_dma(priv);
+
+ free_irq(priv->irq0, priv);
+ free_irq(priv->irq1, priv);
+
+ /* Wait for pending work items to complete - we are stopping
+ * the clock now. Since interrupts are disabled, no new work
+ * will be scheduled.
+ */
+ cancel_work_sync(&priv->bcmgenet_irq_work);
+
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+
+ if (priv->wol_enabled)
+ clk_enable(priv->clk_wol);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static void bcmgenet_timeout(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+
+ dev->trans_start = jiffies;
+
+ dev->stats.tx_errors++;
+
+ netif_tx_wake_all_queues(dev);
+}
+
+#define MAX_MC_COUNT 16
+
+static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr,
+ int *i,
+ int *mc)
+{
+ u32 reg;
+
+ bcmgenet_umac_writel(priv,
+ addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
+ bcmgenet_umac_writel(priv,
+ addr[2] << 24 | addr[3] << 16 |
+ addr[4] << 8 | addr[5],
+ UMAC_MDF_ADDR + ((*i + 1) * 4));
+ reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
+ reg |= (1 << (MAX_MC_COUNT - *mc));
+ bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+ *i += 2;
+ (*mc)++;
+}
+
+static void bcmgenet_set_rx_mode(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int i, mc;
+ u32 reg;
+
+ netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
+
+ /* Promiscous mode */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (dev->flags & IFF_PROMISC) {
+ reg |= CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+ return;
+ } else {
+ reg &= ~CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ }
+
+ /* UniMac doesn't support ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI) {
+ netdev_warn(dev, "ALLMULTI is not supported\n");
+ return;
+ }
+
+ /* update MDF filter */
+ i = 0;
+ mc = 0;
+ /* Broadcast */
+ bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+ /* my own address.*/
+ bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
+ /* Unicast list*/
+ if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
+ return;
+
+ if (!netdev_uc_empty(dev))
+ netdev_for_each_uc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+ /* Multicast */
+ if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
+ return;
+
+ netdev_for_each_mc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+}
+
+/* Set the hardware MAC address. */
+static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ /* Setting the MAC address at the hardware level is not possible
+ * without disabling the UniMAC RX/TX enable bits.
+ */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops bcmgenet_netdev_ops = {
+ .ndo_open = bcmgenet_open,
+ .ndo_stop = bcmgenet_close,
+ .ndo_start_xmit = bcmgenet_xmit,
+ .ndo_tx_timeout = bcmgenet_timeout,
+ .ndo_set_rx_mode = bcmgenet_set_rx_mode,
+ .ndo_set_mac_address = bcmgenet_set_mac_addr,
+ .ndo_do_ioctl = bcmgenet_ioctl,
+ .ndo_set_features = bcmgenet_set_features,
+};
+
+/* Array of GENET hardware parameters/characteristics */
+static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
+ [GENET_V1] = {
+ .tx_queues = 0,
+ .rx_queues = 0,
+ .bds_cnt = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+ },
+ [GENET_V2] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+ .flags = GENET_HAS_EXT,
+ },
+ [GENET_V3] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ },
+ [GENET_V4] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ },
+};
+
+/* Infer hardware parameters from the detected GENET version */
+static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
+{
+ struct bcmgenet_hw_params *params;
+ u32 reg;
+ u8 major;
+
+ if (GENET_IS_V4(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+ priv->version = GENET_V4;
+ } else if (GENET_IS_V3(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+ priv->version = GENET_V3;
+ } else if (GENET_IS_V2(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+ priv->version = GENET_V2;
+ } else if (GENET_IS_V1(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+ priv->version = GENET_V1;
+ }
+
+ /* enum genet_version starts at 1 */
+ priv->hw_params = &bcmgenet_hw_params[priv->version];
+ params = priv->hw_params;
+
+ /* Read GENET HW version */
+ reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
+ major = (reg >> 24 & 0x0f);
+ if (major == 5)
+ major = 4;
+ else if (major == 0)
+ major = 1;
+ if (major != priv->version) {
+ dev_err(&priv->pdev->dev,
+ "GENET version mismatch, got: %d, configured for: %d\n",
+ major, priv->version);
+ }
+
+ /* Print the GENET core version */
+ dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
+ major, (reg >> 16) & 0x0f, reg & 0xffff);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (!(params->flags & GENET_HAS_40BITS))
+ pr_warn("GENET does not support 40-bits PA\n");
+#endif
+
+ pr_debug("Configuration for version: %d\n"
+ "TXq: %1d, RXq: %1d, BDs: %1d\n"
+ "BP << en: %2d, BP msk: 0x%05x\n"
+ "HFB count: %2d, QTAQ msk: 0x%05x\n"
+ "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
+ "RDMA: 0x%05x, TDMA: 0x%05x\n"
+ "Words/BD: %d\n",
+ priv->version,
+ params->tx_queues, params->rx_queues, params->bds_cnt,
+ params->bp_in_en_shift, params->bp_in_mask,
+ params->hfb_filter_cnt, params->qtag_mask,
+ params->tbuf_offset, params->hfb_offset,
+ params->hfb_reg_offset,
+ params->rdma_offset, params->tdma_offset,
+ params->words_per_bd);
+}
+
+static const struct of_device_id bcmgenet_match[] = {
+ { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
+ { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
+ { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
+ { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+ { },
+};
+
+static int bcmgenet_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *of_id;
+ struct bcmgenet_priv *priv;
+ struct net_device *dev;
+ const void *macaddr;
+ struct resource *r;
+ int err = -EIO;
+
+ /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
+ dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
+ if (!dev) {
+ dev_err(&pdev->dev, "can't allocate net device\n");
+ return -ENOMEM;
+ }
+
+ of_id = of_match_node(bcmgenet_match, dn);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = netdev_priv(dev);
+ priv->irq0 = platform_get_irq(pdev, 0);
+ priv->irq1 = platform_get_irq(pdev, 1);
+ if (!priv->irq0 || !priv->irq1) {
+ dev_err(&pdev->dev, "can't find IRQs\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr) {
+ dev_err(&pdev->dev, "can't find MAC address\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->base)) {
+ err = PTR_ERR(priv->base);
+ goto err;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev_set_drvdata(&pdev->dev, dev);
+ ether_addr_copy(dev->dev_addr, macaddr);
+ dev->watchdog_timeo = 2 * HZ;
+ SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+ dev->netdev_ops = &bcmgenet_netdev_ops;
+ netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
+
+ priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
+
+ /* Set hardware features */
+ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+
+ /* Set the needed headroom to account for any possible
+ * features enabling/disabling at runtime
+ */
+ dev->needed_headroom += 64;
+
+ netdev_boot_setup_check(dev);
+
+ priv->dev = dev;
+ priv->pdev = pdev;
+ priv->version = (enum bcmgenet_version)of_id->data;
+
+ bcmgenet_set_hw_params(priv);
+
+ /* Mii wait queue */
+ init_waitqueue_head(&priv->wq);
+ /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
+ priv->rx_buf_len = RX_BUF_LENGTH;
+ INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
+
+ priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+ if (IS_ERR(priv->clk))
+ dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+
+ priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
+ if (IS_ERR(priv->clk_wol))
+ dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ err = reset_umac(priv);
+ if (err)
+ goto err_clk_disable;
+
+ err = bcmgenet_mii_init(dev);
+ if (err)
+ goto err_clk_disable;
+
+ /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
+ * just the ring 16 descriptor based TX
+ */
+ netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
+ netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_clk_disable;
+
+ /* Turn off the main clock, WOL clock is handled separately */
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
+ return err;
+
+err_clk_disable:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+err:
+ free_netdev(dev);
+ return err;
+}
+
+static int bcmgenet_remove(struct platform_device *pdev)
+{
+ struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ unregister_netdev(priv->dev);
+ bcmgenet_mii_exit(priv->dev);
+ free_netdev(priv->dev);
+
+ return 0;
+}
+
+
+static struct platform_driver bcmgenet_driver = {
+ .probe = bcmgenet_probe,
+ .remove = bcmgenet_remove,
+ .driver = {
+ .name = "bcmgenet",
+ .owner = THIS_MODULE,
+ .of_match_table = bcmgenet_match,
+ },
+};
+module_platform_driver(bcmgenet_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
+MODULE_ALIAS("platform:bcmgenet");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
new file mode 100644
index 000000000000..0f117105fed1
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -0,0 +1,628 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *
+*/
+#ifndef __BCMGENET_H__
+#define __BCMGENET_H__
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+/* total number of Buffer Descriptors, same for Rx/Tx */
+#define TOTAL_DESC 256
+
+/* which ring is descriptor based */
+#define DESC_INDEX 16
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN 6
+#define ENET_PAD 8
+#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+ ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+#define DMA_MAX_BURST_LENGTH 0x10
+
+/* misc. configuration */
+#define CLEAR_ALL_HFB 0xFF
+#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
+#define DMA_FC_THRESH_LO 5
+
+/* 64B receive/transmit status block */
+struct status_64 {
+ u32 length_status; /* length and peripheral status */
+ u32 ext_status; /* Extended status*/
+ u32 rx_csum; /* partial rx checksum */
+ u32 unused1[9]; /* unused */
+ u32 tx_csum_info; /* Tx checksum info. */
+ u32 unused2[3]; /* unused */
+};
+
+/* Rx status bits */
+#define STATUS_RX_EXT_MASK 0x1FFFFF
+#define STATUS_RX_CSUM_MASK 0xFFFF
+#define STATUS_RX_CSUM_OK 0x10000
+#define STATUS_RX_CSUM_FR 0x20000
+#define STATUS_RX_PROTO_TCP 0
+#define STATUS_RX_PROTO_UDP 1
+#define STATUS_RX_PROTO_ICMP 2
+#define STATUS_RX_PROTO_OTHER 3
+#define STATUS_RX_PROTO_MASK 3
+#define STATUS_RX_PROTO_SHIFT 18
+#define STATUS_FILTER_INDEX_MASK 0xFFFF
+/* Tx status bits */
+#define STATUS_TX_CSUM_START_MASK 0X7FFF
+#define STATUS_TX_CSUM_START_SHIFT 16
+#define STATUS_TX_CSUM_PROTO_UDP 0x8000
+#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF
+#define STATUS_TX_CSUM_LV 0x80000000
+
+/* DMA Descriptor */
+#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */
+#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */
+#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */
+
+/* Rx/Tx common counter group */
+struct bcmgenet_pkt_counters {
+ u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
+ u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
+ u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
+ u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
+ u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
+ u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
+ u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
+ u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
+ u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
+ u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcmgenet_rx_counters {
+ struct bcmgenet_pkt_counters pkt_cnt;
+ u32 pkt; /* RO (0x428) Received pkt count*/
+ u32 bytes; /* RO Received byte count */
+ u32 mca; /* RO # of Received multicast pkt */
+ u32 bca; /* RO # of Receive broadcast pkt */
+ u32 fcs; /* RO # of Received FCS error */
+ u32 cf; /* RO # of Received control frame pkt*/
+ u32 pf; /* RO # of Received pause frame pkt */
+ u32 uo; /* RO # of unknown op code pkt */
+ u32 aln; /* RO # of alignment error count */
+ u32 flr; /* RO # of frame length out of range count */
+ u32 cde; /* RO # of code error pkt */
+ u32 fcr; /* RO # of carrier sense error pkt */
+ u32 ovr; /* RO # of oversize pkt*/
+ u32 jbr; /* RO # of jabber count */
+ u32 mtue; /* RO # of MTU error pkt*/
+ u32 pok; /* RO # of Received good pkt */
+ u32 uc; /* RO # of unicast pkt */
+ u32 ppp; /* RO # of PPP pkt */
+ u32 rcrc; /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcmgenet_tx_counters {
+ struct bcmgenet_pkt_counters pkt_cnt;
+ u32 pkts; /* RO (0x4a8) Transmited pkt */
+ u32 mca; /* RO # of xmited multicast pkt */
+ u32 bca; /* RO # of xmited broadcast pkt */
+ u32 pf; /* RO # of xmited pause frame count */
+ u32 cf; /* RO # of xmited control frame count */
+ u32 fcs; /* RO # of xmited FCS error count */
+ u32 ovr; /* RO # of xmited oversize pkt */
+ u32 drf; /* RO # of xmited deferral pkt */
+ u32 edf; /* RO # of xmited Excessive deferral pkt*/
+ u32 scl; /* RO # of xmited single collision pkt */
+ u32 mcl; /* RO # of xmited multiple collision pkt*/
+ u32 lcl; /* RO # of xmited late collision pkt */
+ u32 ecl; /* RO # of xmited excessive collision pkt*/
+ u32 frg; /* RO # of xmited fragments pkt*/
+ u32 ncl; /* RO # of xmited total collision count */
+ u32 jbr; /* RO # of xmited jabber count*/
+ u32 bytes; /* RO # of xmited byte count */
+ u32 pok; /* RO # of xmited good pkt */
+ u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcmgenet_mib_counters {
+ struct bcmgenet_rx_counters rx;
+ struct bcmgenet_tx_counters tx;
+ u32 rx_runt_cnt;
+ u32 rx_runt_fcs;
+ u32 rx_runt_fcs_align;
+ u32 rx_runt_bytes;
+ u32 rbuf_ovflow_cnt;
+ u32 rbuf_err_cnt;
+ u32 mdf_err_cnt;
+};
+
+#define UMAC_HD_BKP_CTRL 0x004
+#define HD_FC_EN (1 << 0)
+#define HD_FC_BKOFF_OK (1 << 1)
+#define IPG_CONFIG_RX_SHIFT 2
+#define IPG_CONFIG_RX_MASK 0x1F
+
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define UMAC_SPEED_10 0
+#define UMAC_SPEED_100 1
+#define UMAC_SPEED_1000 2
+#define UMAC_SPEED_2500 3
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+
+#define UMAC_MAC0 0x00C
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+
+#define UMAC_TX_FLUSH 0x334
+
+#define UMAC_MIB_START 0x400
+
+#define UMAC_MDIO_CMD 0x614
+#define MDIO_START_BUSY (1 << 29)
+#define MDIO_READ_FAIL (1 << 28)
+#define MDIO_RD (2 << 26)
+#define MDIO_WR (1 << 26)
+#define MDIO_PMD_SHIFT 21
+#define MDIO_PMD_MASK 0x1F
+#define MDIO_REG_SHIFT 16
+#define MDIO_REG_MASK 0x1F
+
+#define UMAC_RBUF_OVFL_CNT 0x61C
+
+#define UMAC_MPD_CTRL 0x620
+#define MPD_EN (1 << 0)
+#define MPD_PW_EN (1 << 27)
+#define MPD_MSEQ_LEN_SHIFT 16
+#define MPD_MSEQ_LEN_MASK 0xFF
+
+#define UMAC_MPD_PW_MS 0x624
+#define UMAC_MPD_PW_LS 0x628
+#define UMAC_RBUF_ERR_CNT 0x634
+#define UMAC_MDF_ERR_CNT 0x638
+#define UMAC_MDF_CTRL 0x650
+#define UMAC_MDF_ADDR 0x654
+#define UMAC_MIB_CTRL 0x580
+#define MIB_RESET_RX (1 << 0)
+#define MIB_RESET_RUNT (1 << 1)
+#define MIB_RESET_TX (1 << 2)
+
+#define RBUF_CTRL 0x00
+#define RBUF_64B_EN (1 << 0)
+#define RBUF_ALIGN_2B (1 << 1)
+#define RBUF_BAD_DIS (1 << 2)
+
+#define RBUF_STATUS 0x0C
+#define RBUF_STATUS_WOL (1 << 0)
+#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1)
+#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2)
+
+#define RBUF_CHK_CTRL 0x14
+#define RBUF_RXCHK_EN (1 << 0)
+#define RBUF_SKIP_FCS (1 << 4)
+
+#define RBUF_TBUF_SIZE_CTRL 0xb4
+
+#define RBUF_HFB_CTRL_V1 0x38
+#define RBUF_HFB_FILTER_EN_SHIFT 16
+#define RBUF_HFB_FILTER_EN_MASK 0xffff0000
+#define RBUF_HFB_EN (1 << 0)
+#define RBUF_HFB_256B (1 << 1)
+#define RBUF_ACPI_EN (1 << 2)
+
+#define RBUF_HFB_LEN_V1 0x3C
+#define RBUF_FLTR_LEN_MASK 0xFF
+#define RBUF_FLTR_LEN_SHIFT 8
+
+#define TBUF_CTRL 0x00
+#define TBUF_BP_MC 0x0C
+
+#define TBUF_CTRL_V1 0x80
+#define TBUF_BP_MC_V1 0xA0
+
+#define HFB_CTRL 0x00
+#define HFB_FLT_ENABLE_V3PLUS 0x04
+#define HFB_FLT_LEN_V2 0x04
+#define HFB_FLT_LEN_V3PLUS 0x1C
+
+/* uniMac intrl2 registers */
+#define INTRL2_CPU_STAT 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0C
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* INTRL2 instance 0 definitions */
+#define UMAC_IRQ_SCB (1 << 0)
+#define UMAC_IRQ_EPHY (1 << 1)
+#define UMAC_IRQ_PHY_DET_R (1 << 2)
+#define UMAC_IRQ_PHY_DET_F (1 << 3)
+#define UMAC_IRQ_LINK_UP (1 << 4)
+#define UMAC_IRQ_LINK_DOWN (1 << 5)
+#define UMAC_IRQ_UMAC (1 << 6)
+#define UMAC_IRQ_UMAC_TSV (1 << 7)
+#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8)
+#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9)
+#define UMAC_IRQ_HFB_SM (1 << 10)
+#define UMAC_IRQ_HFB_MM (1 << 11)
+#define UMAC_IRQ_MPD_R (1 << 12)
+#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
+#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
+#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
+#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
+#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
+#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
+/* Only valid for GENETv3+ */
+#define UMAC_IRQ_MDIO_DONE (1 << 23)
+#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+
+/* Register block offsets */
+#define GENET_SYS_OFF 0x0000
+#define GENET_GR_BRIDGE_OFF 0x0040
+#define GENET_EXT_OFF 0x0080
+#define GENET_INTRL2_0_OFF 0x0200
+#define GENET_INTRL2_1_OFF 0x0240
+#define GENET_RBUF_OFF 0x0300
+#define GENET_UMAC_OFF 0x0800
+
+/* SYS block offsets and register definitions */
+#define SYS_REV_CTRL 0x00
+#define SYS_PORT_CTRL 0x04
+#define PORT_MODE_INT_EPHY 0
+#define PORT_MODE_INT_GPHY 1
+#define PORT_MODE_EXT_EPHY 2
+#define PORT_MODE_EXT_GPHY 3
+#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4))
+#define PORT_MODE_EXT_RVMII_50 4
+#define LED_ACT_SOURCE_MAC (1 << 9)
+
+#define SYS_RBUF_FLUSH_CTRL 0x08
+#define SYS_TBUF_FLUSH_CTRL 0x0C
+#define RBUF_FLUSH_CTRL_V1 0x04
+
+/* Ext block register offsets and definitions */
+#define EXT_EXT_PWR_MGMT 0x00
+#define EXT_PWR_DOWN_BIAS (1 << 0)
+#define EXT_PWR_DOWN_DLL (1 << 1)
+#define EXT_PWR_DOWN_PHY (1 << 2)
+#define EXT_PWR_DN_EN_LD (1 << 3)
+#define EXT_ENERGY_DET (1 << 4)
+#define EXT_IDDQ_FROM_PHY (1 << 5)
+#define EXT_PHY_RESET (1 << 8)
+#define EXT_ENERGY_DET_MASK (1 << 12)
+
+#define EXT_RGMII_OOB_CTRL 0x0C
+#define RGMII_MODE_EN (1 << 0)
+#define RGMII_LINK (1 << 4)
+#define OOB_DISABLE (1 << 5)
+#define ID_MODE_DIS (1 << 16)
+
+#define EXT_GPHY_CTRL 0x1C
+#define EXT_CFG_IDDQ_BIAS (1 << 0)
+#define EXT_CFG_PWR_DOWN (1 << 1)
+#define EXT_GPHY_RESET (1 << 5)
+
+/* DMA rings size */
+#define DMA_RING_SIZE (0x40)
+#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1))
+
+/* DMA registers common definitions */
+#define DMA_RW_POINTER_MASK 0x1FF
+#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF
+#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16
+#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF
+#define DMA_BUFFER_DONE_CNT_SHIFT 16
+#define DMA_P_INDEX_MASK 0xFFFF
+#define DMA_C_INDEX_MASK 0xFFFF
+
+/* DMA ring size register */
+#define DMA_RING_SIZE_MASK 0xFFFF
+#define DMA_RING_SIZE_SHIFT 16
+#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF
+
+/* DMA interrupt threshold register */
+#define DMA_INTR_THRESHOLD_MASK 0x00FF
+
+/* DMA XON/XOFF register */
+#define DMA_XON_THREHOLD_MASK 0xFFFF
+#define DMA_XOFF_THRESHOLD_MASK 0xFFFF
+#define DMA_XOFF_THRESHOLD_SHIFT 16
+
+/* DMA flow period register */
+#define DMA_FLOW_PERIOD_MASK 0xFFFF
+#define DMA_MAX_PKT_SIZE_MASK 0xFFFF
+#define DMA_MAX_PKT_SIZE_SHIFT 16
+
+
+/* DMA control register */
+#define DMA_EN (1 << 0)
+#define DMA_RING_BUF_EN_SHIFT 0x01
+#define DMA_RING_BUF_EN_MASK 0xFFFF
+#define DMA_TSB_SWAP_EN (1 << 20)
+
+/* DMA status register */
+#define DMA_DISABLED (1 << 0)
+#define DMA_DESC_RAM_INIT_BUSY (1 << 1)
+
+/* DMA SCB burst size register */
+#define DMA_SCB_BURST_SIZE_MASK 0x1F
+
+/* DMA activity vector register */
+#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF
+
+/* DMA backpressure mask register */
+#define DMA_BACKPRESSURE_MASK 0x1FFFF
+#define DMA_PFC_ENABLE (1 << 31)
+
+/* DMA backpressure status register */
+#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF
+
+/* DMA override register */
+#define DMA_LITTLE_ENDIAN_MODE (1 << 0)
+#define DMA_REGISTER_MODE (1 << 1)
+
+/* DMA timeout register */
+#define DMA_TIMEOUT_MASK 0xFFFF
+#define DMA_TIMEOUT_VAL 5000 /* micro seconds */
+
+/* TDMA rate limiting control register */
+#define DMA_RATE_LIMIT_EN_MASK 0xFFFF
+
+/* TDMA arbitration control register */
+#define DMA_ARBITER_MODE_MASK 0x03
+#define DMA_RING_BUF_PRIORITY_MASK 0x1F
+#define DMA_RING_BUF_PRIORITY_SHIFT 5
+#define DMA_RATE_ADJ_MASK 0xFF
+
+/* Tx/Rx Dma Descriptor common bits*/
+#define DMA_BUFLENGTH_MASK 0x0fff
+#define DMA_BUFLENGTH_SHIFT 16
+#define DMA_OWN 0x8000
+#define DMA_EOP 0x4000
+#define DMA_SOP 0x2000
+#define DMA_WRAP 0x1000
+/* Tx specific Dma descriptor bits */
+#define DMA_TX_UNDERRUN 0x0200
+#define DMA_TX_APPEND_CRC 0x0040
+#define DMA_TX_OW_CRC 0x0020
+#define DMA_TX_DO_CSUM 0x0010
+#define DMA_TX_QTAG_SHIFT 7
+
+/* Rx Specific Dma descriptor bits */
+#define DMA_RX_CHK_V3PLUS 0x8000
+#define DMA_RX_CHK_V12 0x1000
+#define DMA_RX_BRDCAST 0x0040
+#define DMA_RX_MULT 0x0020
+#define DMA_RX_LG 0x0010
+#define DMA_RX_NO 0x0008
+#define DMA_RX_RXER 0x0004
+#define DMA_RX_CRC_ERROR 0x0002
+#define DMA_RX_OV 0x0001
+#define DMA_RX_FI_MASK 0x001F
+#define DMA_RX_FI_SHIFT 0x0007
+#define DMA_DESC_ALLOC_MASK 0x00FF
+
+#define DMA_ARBITER_RR 0x00
+#define DMA_ARBITER_WRR 0x01
+#define DMA_ARBITER_SP 0x02
+
+struct enet_cb {
+ struct sk_buff *skb;
+ void __iomem *bd_addr;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* power management mode */
+enum bcmgenet_power_mode {
+ GENET_POWER_CABLE_SENSE = 0,
+ GENET_POWER_PASSIVE,
+};
+
+struct bcmgenet_priv;
+
+/* We support both runtime GENET detection and compile-time
+ * to optimize code-paths for a given hardware
+ */
+enum bcmgenet_version {
+ GENET_V1 = 1,
+ GENET_V2,
+ GENET_V3,
+ GENET_V4
+};
+
+#define GENET_IS_V1(p) ((p)->version == GENET_V1)
+#define GENET_IS_V2(p) ((p)->version == GENET_V2)
+#define GENET_IS_V3(p) ((p)->version == GENET_V3)
+#define GENET_IS_V4(p) ((p)->version == GENET_V4)
+
+/* Hardware flags */
+#define GENET_HAS_40BITS (1 << 0)
+#define GENET_HAS_EXT (1 << 1)
+#define GENET_HAS_MDIO_INTR (1 << 2)
+
+/* BCMGENET hardware parameters, keep this structure nicely aligned
+ * since it is going to be used in hot paths
+ */
+struct bcmgenet_hw_params {
+ u8 tx_queues;
+ u8 rx_queues;
+ u8 bds_cnt;
+ u8 bp_in_en_shift;
+ u32 bp_in_mask;
+ u8 hfb_filter_cnt;
+ u8 qtag_mask;
+ u16 tbuf_offset;
+ u32 hfb_offset;
+ u32 hfb_reg_offset;
+ u32 rdma_offset;
+ u32 tdma_offset;
+ u32 words_per_bd;
+ u32 flags;
+};
+
+struct bcmgenet_tx_ring {
+ spinlock_t lock; /* ring lock */
+ unsigned int index; /* ring index */
+ unsigned int queue; /* queue index */
+ struct enet_cb *cbs; /* tx ring buffer control block*/
+ unsigned int size; /* size of each tx ring */
+ unsigned int c_index; /* last consumer index of each ring*/
+ unsigned int free_bds; /* # of free bds for each ring */
+ unsigned int write_ptr; /* Tx ring write pointer SW copy */
+ unsigned int prod_index; /* Tx ring producer index SW copy */
+ unsigned int cb_ptr; /* Tx ring initial CB ptr */
+ unsigned int end_ptr; /* Tx ring end CB ptr */
+ void (*int_enable)(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *);
+ void (*int_disable)(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *);
+};
+
+/* device context */
+struct bcmgenet_priv {
+ void __iomem *base;
+ enum bcmgenet_version version;
+ struct net_device *dev;
+ u32 int0_mask;
+ u32 int1_mask;
+
+ /* NAPI for descriptor based rx */
+ struct napi_struct napi ____cacheline_aligned;
+
+ /* transmit variables */
+ void __iomem *tx_bds;
+ struct enet_cb *tx_cbs;
+ unsigned int num_tx_bds;
+
+ struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+
+ /* receive variables */
+ void __iomem *rx_bds;
+ void __iomem *rx_bd_assign_ptr;
+ int rx_bd_assign_index;
+ struct enet_cb *rx_cbs;
+ unsigned int num_rx_bds;
+ unsigned int rx_buf_len;
+ unsigned int rx_read_ptr;
+ unsigned int rx_c_index;
+
+ /* other misc variables */
+ struct bcmgenet_hw_params *hw_params;
+
+ /* MDIO bus variables */
+ wait_queue_head_t wq;
+ struct phy_device *phydev;
+ struct device_node *phy_dn;
+ struct mii_bus *mii_bus;
+
+ /* PHY device variables */
+ int old_duplex;
+ int old_link;
+ int old_pause;
+ phy_interface_t phy_interface;
+ int phy_addr;
+ int ext_phy;
+
+ /* Interrupt variables */
+ struct work_struct bcmgenet_irq_work;
+ int irq0;
+ int irq1;
+ unsigned int irq0_stat;
+ unsigned int irq1_stat;
+
+ /* HW descriptors/checksum variables */
+ bool desc_64b_en;
+ bool desc_rxchk_en;
+ bool crc_fwd_en;
+
+ unsigned int dma_rx_chk_bit;
+
+ u32 msg_enable;
+
+ struct clk *clk;
+ struct platform_device *pdev;
+
+ /* WOL */
+ unsigned long wol_enabled;
+ struct clk *clk_wol;
+ u32 wolopts;
+
+ struct bcmgenet_mib_counters mib;
+};
+
+#define GENET_IO_MACRO(name, offset) \
+static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
+ u32 off) \
+{ \
+ return __raw_readl(priv->base + offset + off); \
+} \
+static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ __raw_writel(val, priv->base + offset + off); \
+}
+
+GENET_IO_MACRO(ext, GENET_EXT_OFF);
+GENET_IO_MACRO(umac, GENET_UMAC_OFF);
+GENET_IO_MACRO(sys, GENET_SYS_OFF);
+
+/* interrupt l2 registers accessors */
+GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF);
+GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF);
+
+/* HFB register accessors */
+GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset);
+
+/* GENET v2+ HFB control and filter len helpers */
+GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset);
+
+/* RBUF register accessors */
+GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
+
+/* MDIO routines */
+int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev);
+void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_mii_reset(struct net_device *dev);
+
+#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
new file mode 100644
index 000000000000..4608673beaff
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -0,0 +1,464 @@
+/*
+ * Broadcom GENET MDIO routines
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/brcmphy.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+
+#include "bcmgenet.h"
+
+/* read a value from the MII */
+static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
+{
+ int ret;
+ struct net_device *dev = bus->priv;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
+ (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
+ /* Start MDIO transaction*/
+ reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+ reg |= MDIO_START_BUSY;
+ bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+ wait_event_timeout(priv->wq,
+ !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+ & MDIO_START_BUSY),
+ HZ / 100);
+ ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+
+ if (ret & MDIO_READ_FAIL)
+ return -EIO;
+
+ return ret & 0xffff;
+}
+
+/* write a value to the MII */
+static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
+ int location, u16 val)
+{
+ struct net_device *dev = bus->priv;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
+ (location << MDIO_REG_SHIFT) | (0xffff & val)),
+ UMAC_MDIO_CMD);
+ reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+ reg |= MDIO_START_BUSY;
+ bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+ wait_event_timeout(priv->wq,
+ !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
+ MDIO_START_BUSY),
+ HZ / 100);
+
+ return 0;
+}
+
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+static void bcmgenet_mii_setup(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ u32 reg, cmd_bits = 0;
+ unsigned int status_changed = 0;
+
+ if (priv->old_link != phydev->link) {
+ status_changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ if (phydev->link) {
+ /* program UMAC and RGMII block based on established link
+ * speed, pause, and duplex.
+ * the speed set in umac->cmd tell RGMII block which clock
+ * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit.
+ * receive clock is provided by PHY.
+ */
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+ /* speed */
+ if (phydev->speed == SPEED_1000)
+ cmd_bits = UMAC_SPEED_1000;
+ else if (phydev->speed == SPEED_100)
+ cmd_bits = UMAC_SPEED_100;
+ else
+ cmd_bits = UMAC_SPEED_10;
+ cmd_bits <<= CMD_SPEED_SHIFT;
+
+ if (priv->old_duplex != phydev->duplex) {
+ status_changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ /* duplex */
+ if (phydev->duplex != DUPLEX_FULL)
+ cmd_bits |= CMD_HD_EN;
+
+ if (priv->old_pause != phydev->pause) {
+ status_changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ /* pause capability */
+ if (!phydev->pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ }
+
+ if (status_changed)
+ phy_print_status(phydev);
+}
+
+void bcmgenet_mii_reset(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (priv->phydev) {
+ phy_init_hw(priv->phydev);
+ phy_start_aneg(priv->phydev);
+ }
+}
+
+static void bcmgenet_ephy_power_up(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg = 0;
+
+ /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
+ if (!GENET_IS_V4(priv))
+ return;
+
+ reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(2);
+
+ reg &= ~EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ udelay(20);
+}
+
+static void bcmgenet_internal_phy_setup(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ /* Power up EPHY */
+ bcmgenet_ephy_power_up(dev);
+ /* enable APD */
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_mii_reset(dev);
+}
+
+static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+
+ /* Speed settings are set in bcmgenet_mii_setup() */
+ reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+ reg |= LED_ACT_SOURCE_MAC;
+ bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+}
+
+int bcmgenet_mii_config(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct device *kdev = &priv->pdev->dev;
+ const char *phy_name = NULL;
+ u32 id_mode_dis = 0;
+ u32 port_ctrl;
+ u32 reg;
+
+ priv->ext_phy = !phy_is_internal(priv->phydev) &&
+ (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+
+ if (phy_is_internal(priv->phydev))
+ priv->phy_interface = PHY_INTERFACE_MODE_NA;
+
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MOCA:
+ /* Irrespective of the actually configured PHY speed (100 or
+ * 1000) GENETv4 only has an internal GPHY so we will just end
+ * up masking the Gigabit features from what we support, not
+ * switching to the EPHY
+ */
+ if (GENET_IS_V4(priv))
+ port_ctrl = PORT_MODE_INT_GPHY;
+ else
+ port_ctrl = PORT_MODE_INT_EPHY;
+
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+
+ if (phy_is_internal(priv->phydev)) {
+ phy_name = "internal PHY";
+ bcmgenet_internal_phy_setup(dev);
+ } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ phy_name = "MoCA";
+ bcmgenet_moca_phy_setup(priv);
+ }
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ phy_name = "external MII";
+ phydev->supported &= PHY_BASIC_FEATURES;
+ bcmgenet_sys_writel(priv,
+ PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+ break;
+
+ case PHY_INTERFACE_MODE_REVMII:
+ phy_name = "external RvMII";
+ /* of_mdiobus_register took care of reading the 'max-speed'
+ * PHY property for us, effectively limiting the PHY supported
+ * capabilities, use that knowledge to also configure the
+ * Reverse MII interface correctly.
+ */
+ if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
+ PHY_BASIC_FEATURES)
+ port_ctrl = PORT_MODE_EXT_RVMII_25;
+ else
+ port_ctrl = PORT_MODE_EXT_RVMII_50;
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* RGMII_NO_ID: TXC transitions at the same time as TXD
+ * (requires PCB or receiver-side delay)
+ * RGMII: Add 2ns delay on TXC (90 degree shift)
+ *
+ * ID is implicitly disabled for 100Mbps (RG)MII operation.
+ */
+ id_mode_dis = BIT(16);
+ /* fall through */
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (id_mode_dis)
+ phy_name = "external RGMII (no delay)";
+ else
+ phy_name = "external RGMII (TX delay)";
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg |= RGMII_MODE_EN | id_mode_dis;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ bcmgenet_sys_writel(priv,
+ PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+ break;
+ default:
+ dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
+ return -EINVAL;
+ }
+
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
+
+ return 0;
+}
+
+static int bcmgenet_mii_probe(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ unsigned int phy_flags;
+ int ret;
+
+ if (priv->phydev) {
+ pr_info("PHY already attached\n");
+ return 0;
+ }
+
+ if (priv->phy_dn)
+ phydev = of_phy_connect(dev, priv->phy_dn,
+ bcmgenet_mii_setup, 0,
+ priv->phy_interface);
+ else
+ phydev = of_phy_connect_fixed_link(dev,
+ bcmgenet_mii_setup,
+ priv->phy_interface);
+
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ priv->old_link = -1;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
+ priv->phydev = phydev;
+
+ /* Configure port multiplexer based on what the probed PHY device since
+ * reading the 'max-speed' property determines the maximum supported
+ * PHY speed which is needed for bcmgenet_mii_config() to configure
+ * things appropriately.
+ */
+ ret = bcmgenet_mii_config(dev);
+ if (ret) {
+ phy_disconnect(priv->phydev);
+ return ret;
+ }
+
+ phy_flags = PHY_BRCM_100MBPS_WAR;
+
+ /* workarounds are only needed for 100Mpbs PHYs, and
+ * never on GENET V1 hardware
+ */
+ if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv))
+ phy_flags = 0;
+
+ phydev->dev_flags |= phy_flags;
+ phydev->advertising = phydev->supported;
+
+ /* The internal PHY has its link interrupts routed to the
+ * Ethernet MAC ISRs
+ */
+ if (phy_is_internal(priv->phydev))
+ priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
+ else
+ priv->mii_bus->irq[phydev->addr] = PHY_POLL;
+
+ pr_info("attached PHY at address %d [%s]\n",
+ phydev->addr, phydev->drv->name);
+
+ return 0;
+}
+
+static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
+{
+ struct mii_bus *bus;
+
+ if (priv->mii_bus)
+ return 0;
+
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus) {
+ pr_err("failed to allocate\n");
+ return -ENOMEM;
+ }
+
+ bus = priv->mii_bus;
+ bus->priv = priv->dev;
+ bus->name = "bcmgenet MII bus";
+ bus->parent = &priv->pdev->dev;
+ bus->read = bcmgenet_mii_read;
+ bus->write = bcmgenet_mii_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ priv->pdev->name, priv->pdev->id);
+
+ bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bus->irq) {
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
+{
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct device *kdev = &priv->pdev->dev;
+ struct device_node *mdio_dn;
+ char *compat;
+ int ret;
+
+ compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
+ if (!compat)
+ return -ENOMEM;
+
+ mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ kfree(compat);
+ if (!mdio_dn) {
+ dev_err(kdev, "unable to find MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
+ if (ret) {
+ dev_err(kdev, "failed to register MDIO bus\n");
+ return ret;
+ }
+
+ /* Fetch the PHY phandle */
+ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+
+ /* Get the link mode */
+ priv->phy_interface = of_get_phy_mode(dn);
+
+ return 0;
+}
+
+int bcmgenet_mii_init(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = bcmgenet_mii_alloc(priv);
+ if (ret)
+ return ret;
+
+ ret = bcmgenet_mii_of_init(priv);
+ if (ret)
+ goto out_free;
+
+ ret = bcmgenet_mii_probe(dev);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mdiobus_unregister(priv->mii_bus);
+out_free:
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+ return ret;
+}
+
+void bcmgenet_mii_exit(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ mdiobus_unregister(priv->mii_bus);
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 70a225c8df5c..b9f7022f4e81 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1401,11 +1401,6 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
return ret;
}
-static int tg3_mdio_reset(struct mii_bus *bp)
-{
- return 0;
-}
-
static void tg3_mdio_config_5785(struct tg3 *tp)
{
u32 val;
@@ -1542,7 +1537,6 @@ static int tg3_mdio_init(struct tg3 *tp)
tp->mdio_bus->parent = &tp->pdev->dev;
tp->mdio_bus->read = &tg3_mdio_read;
tp->mdio_bus->write = &tg3_mdio_write;
- tp->mdio_bus->reset = &tg3_mdio_reset;
tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
tp->mdio_bus->irq = &tp->mdio_irq[0];
@@ -6322,6 +6316,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 1,
+ .n_pins = 0,
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
@@ -6593,7 +6588,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
pkts_compl++;
bytes_compl += skb->len;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
if (unlikely(tx_bug)) {
tg3_tx_recover(tp);
@@ -6924,7 +6919,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
if (len > (tp->dev->mtu + ETH_HLEN) &&
skb->protocol != htons(ETH_P_8021Q)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
goto drop_it_no_recycle;
}
@@ -7807,7 +7802,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
PCI_DMA_TODEVICE);
/* Make sure the mapping succeeded */
if (pci_dma_mapping_error(tp->pdev, new_addr)) {
- dev_kfree_skb(new_skb);
+ dev_kfree_skb_any(new_skb);
ret = -1;
} else {
u32 save_entry = *entry;
@@ -7822,13 +7817,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
new_skb->len, base_flags,
mss, vlan)) {
tg3_tx_skb_unmap(tnapi, save_entry, -1);
- dev_kfree_skb(new_skb);
+ dev_kfree_skb_any(new_skb);
ret = -1;
}
}
}
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
*pskb = new_skb;
return ret;
}
@@ -7871,7 +7866,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
} while (segs);
tg3_tso_bug_end:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -7923,8 +7918,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct iphdr *iph;
u32 tcp_opt_len, hdr_len;
- if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_cow_head(skb, 0))
goto drop;
iph = ip_hdr(skb);
@@ -8093,7 +8087,7 @@ dma_error:
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
drop:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
drop_nofree:
tp->tx_dropped++;
return NETDEV_TX_OK;
@@ -11361,12 +11355,10 @@ static bool tg3_enable_msix(struct tg3 *tp)
msix_ent[i].vector = 0;
}
- rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
+ rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
if (rc < 0) {
return false;
- } else if (rc != 0) {
- if (pci_enable_msix(tp->pdev, msix_ent, rc))
- return false;
+ } else if (rc < tp->irq_cnt) {
netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
tp->irq_cnt, rc);
tp->irq_cnt = rc;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 4ad1187e82fb..675550fe8ee9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2496,12 +2496,10 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
{
int err;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err) {
- BNAD_UPDATE_CTR(bnad, tso_err);
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0) {
+ BNAD_UPDATE_CTR(bnad, tso_err);
+ return err;
}
/*
@@ -2669,9 +2667,11 @@ bnad_enable_msix(struct bnad *bnad)
for (i = 0; i < bnad->msix_num; i++)
bnad->msix_table[i].entry = i;
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
- if (ret > 0) {
- /* Not enough MSI-X vectors. */
+ ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
+ 1, bnad->msix_num);
+ if (ret < 0) {
+ goto intx_mode;
+ } else if (ret < bnad->msix_num) {
pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
ret, bnad->msix_num);
@@ -2684,18 +2684,11 @@ bnad_enable_msix(struct bnad *bnad)
bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
BNAD_MAILBOX_MSIX_VECTORS;
- if (bnad->msix_num > ret)
+ if (bnad->msix_num > ret) {
+ pci_disable_msix(bnad->pcidev);
goto intx_mode;
-
- /* Try once more with adjusted numbers */
- /* If this fails, fall back to INTx */
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
- bnad->msix_num);
- if (ret)
- goto intx_mode;
-
- } else if (ret < 0)
- goto intx_mode;
+ }
+ }
pci_intx(bnad->pcidev, 0);
@@ -2850,13 +2843,11 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
}
if (unlikely((gso_size + skb_transport_offset(skb) +
tcp_hdrlen(skb)) >= skb->len)) {
- txqent->hdr.wi.opcode =
- __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
} else {
- txqent->hdr.wi.opcode =
- __constant_htons(BNA_TXQ_WI_SEND_LSO);
+ txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
txqent->hdr.wi.lso_mss = htons(gso_size);
}
@@ -2870,7 +2861,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
} else {
- txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
@@ -2881,11 +2872,10 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 proto = 0;
- if (skb->protocol == __constant_htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP))
proto = ip_hdr(skb)->protocol;
#ifdef NETIF_F_IPV6_CSUM
- else if (skb->protocol ==
- __constant_htons(ETH_P_IPV6)) {
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
/* nexthdr may not be TCP immediately. */
proto = ipv6_hdr(skb)->nexthdr;
}
@@ -2954,17 +2944,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Sanity checks for the skb */
if (unlikely(skb->len <= ETH_HLEN)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
return NETDEV_TX_OK;
}
if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
if (unlikely(len == 0)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
@@ -2976,7 +2966,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* and the netif_tx_stop_all_queues() call.
*/
if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
@@ -2989,7 +2979,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
return NETDEV_TX_OK;
}
@@ -3029,7 +3019,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Program the opcode, flags, frame_len, num_vectors in WI */
if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
txqent->hdr.wi.reserved = 0;
@@ -3055,7 +3045,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
tcb->producer_index);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
return NETDEV_TX_OK;
}
@@ -3067,8 +3057,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
vect_id = 0;
BNA_QE_INDX_INC(prod, q_depth);
txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
- txqent->hdr.wi_ext.opcode =
- __constant_htons(BNA_TXQ_WI_EXTENSION);
+ txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
unmap = &unmap_q[prod];
}
@@ -3085,7 +3074,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(len != skb->len)) {
/* Undo the changes starting at tcb->producer_index */
bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index d0c38e01e99f..ca97005e24b4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -199,11 +199,6 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
-static int macb_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
/**
* macb_set_tx_clk() - Set a clock to a new frequency
* @clk Pointer to the clock to change
@@ -375,7 +370,6 @@ int macb_mii_init(struct macb *bp)
bp->mii_bus->name = "MACB_mii_bus";
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
- bp->mii_bus->reset = &macb_mdio_reset;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
@@ -1045,7 +1039,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
goto unlock;
}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index d2a183c3a6ce..521dfea44b83 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -897,7 +897,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
/* Check tx error on the last segment */
if (desc_get_tx_ls(p)) {
desc_get_tx_status(priv, p);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
priv->tx_skbuff[entry] = NULL;
@@ -1105,7 +1105,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, paddr)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
priv->tx_skbuff[entry] = skb;
@@ -1169,7 +1169,7 @@ dma_err:
desc = first;
dma_unmap_single(priv->device, desc_get_buf_addr(desc),
desc_get_buf_len(desc), DMA_TO_DEVICE);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 45d77334d7d9..07bbb711b7e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
int vectors;
- int i, err;
+ int i;
vectors = ARRAY_SIZE(entries);
for (i = 0; i < vectors; ++i)
entries[i].entry = i;
- while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
- vectors = err;
-
- if (err < 0)
- pci_disable_msix(adap->pdev);
-
- if (!err && vectors < (adap->params.nports + 1)) {
- pci_disable_msix(adap->pdev);
- err = -1;
- }
+ vectors = pci_enable_msix_range(adap->pdev, entries,
+ adap->params.nports + 1, vectors);
+ if (vectors < 0)
+ return vectors;
- if (!err) {
- for (i = 0; i < vectors; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- adap->msix_nvectors = vectors;
- }
+ for (i = 0; i < vectors; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ adap->msix_nvectors = vectors;
- return err;
+ return 0;
}
static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 632b318eb38a..8b069f96e920 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -298,7 +298,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
if (d->eop) {
- kfree_skb(d->skb);
+ dev_consume_skb_any(d->skb);
d->skb = NULL;
}
}
@@ -1188,7 +1188,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
V_WR_TID(q->token));
wr_gen2(d, gen);
- kfree_skb(skb);
+ dev_consume_skb_any(skb);
return;
}
@@ -1233,7 +1233,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1f4b9b30b9ed..32db37709263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -66,6 +66,7 @@ enum {
SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
+ PN_LEN = 16, /* Part Number length */
};
enum {
@@ -254,6 +255,7 @@ struct vpd_params {
u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
+ u8 pn[PN_LEN + 1];
};
struct pci_params {
@@ -306,6 +308,7 @@ struct adapter_params {
unsigned char bypass;
unsigned int ofldq_wr_cred;
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
#include "t4fw_api.h"
@@ -497,6 +500,7 @@ struct sge_txq {
spinlock_t db_lock;
int db_disabled;
unsigned short db_pidx;
+ unsigned short db_pidx_inc;
u64 udb;
};
@@ -553,8 +557,13 @@ struct sge {
u32 pktshift; /* padding between CPL & packet data */
u32 fl_align; /* response queue message alignment */
u32 fl_starve_thres; /* Free List starvation threshold */
- unsigned int starve_thres;
- u8 idma_state[2];
+
+ /* State variables for detecting an SGE Ingress DMA hang */
+ unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
+ unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
+ unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
+ unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
+
unsigned int egr_start;
unsigned int ingr_start;
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
@@ -957,7 +966,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
-
+const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
@@ -1029,4 +1038,5 @@ void t4_db_dropped(struct adapter *adapter);
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
+void t4_sge_decode_idma_state(struct adapter *adapter, int state);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 34e2488767d9..6fe58913403a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -254,6 +254,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5011, 4),
CH_DEVICE(0x5012, 4),
CH_DEVICE(0x5013, 4),
+ CH_DEVICE(0x5014, 4),
+ CH_DEVICE(0x5015, 4),
+ CH_DEVICE(0x5080, 4),
+ CH_DEVICE(0x5081, 4),
+ CH_DEVICE(0x5082, 4),
+ CH_DEVICE(0x5083, 4),
+ CH_DEVICE(0x5084, 4),
+ CH_DEVICE(0x5085, 4),
CH_DEVICE(0x5401, 4),
CH_DEVICE(0x5402, 4),
CH_DEVICE(0x5403, 4),
@@ -273,6 +281,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5411, 4),
CH_DEVICE(0x5412, 4),
CH_DEVICE(0x5413, 4),
+ CH_DEVICE(0x5414, 4),
+ CH_DEVICE(0x5415, 4),
+ CH_DEVICE(0x5480, 4),
+ CH_DEVICE(0x5481, 4),
+ CH_DEVICE(0x5482, 4),
+ CH_DEVICE(0x5483, 4),
+ CH_DEVICE(0x5484, 4),
+ CH_DEVICE(0x5485, 4),
{ 0, }
};
@@ -423,15 +439,18 @@ static void link_report(struct net_device *dev)
const struct port_info *p = netdev_priv(dev);
switch (p->link_cfg.speed) {
- case SPEED_10000:
+ case 10000:
s = "10Gbps";
break;
- case SPEED_1000:
+ case 1000:
s = "1000Mbps";
break;
- case SPEED_100:
+ case 100:
s = "100Mbps";
break;
+ case 40000:
+ s = "40Gbps";
+ break;
}
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2061,7 +2080,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x40200, 0x40298,
0x402ac, 0x4033c,
0x403f8, 0x403fc,
- 0x41300, 0x413c4,
+ 0x41304, 0x413c4,
0x41400, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x44078,
@@ -2089,7 +2108,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x48200, 0x48298,
0x482ac, 0x4833c,
0x483f8, 0x483fc,
- 0x49300, 0x493c4,
+ 0x49304, 0x493c4,
0x49400, 0x4941c,
0x49480, 0x494d0,
0x4c000, 0x4c078,
@@ -2199,6 +2218,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
else if (type == FW_PORT_TYPE_FIBER_XFI ||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
+ else if (type == FW_PORT_TYPE_BP40_BA)
+ v |= SUPPORTED_40000baseSR4_Full;
if (caps & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
@@ -2215,6 +2236,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps)
v |= FW_PORT_CAP_SPEED_1G;
if (caps & ADVERTISED_10000baseT_Full)
v |= FW_PORT_CAP_SPEED_10G;
+ if (caps & ADVERTISED_40000baseSR4_Full)
+ v |= FW_PORT_CAP_SPEED_40G;
return v;
}
@@ -2263,12 +2286,14 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static unsigned int speed_to_caps(int speed)
{
- if (speed == SPEED_100)
+ if (speed == 100)
return FW_PORT_CAP_SPEED_100M;
- if (speed == SPEED_1000)
+ if (speed == 1000)
return FW_PORT_CAP_SPEED_1G;
- if (speed == SPEED_10000)
+ if (speed == 10000)
return FW_PORT_CAP_SPEED_10G;
+ if (speed == 40000)
+ return FW_PORT_CAP_SPEED_40G;
return 0;
}
@@ -2296,8 +2321,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_DISABLE) {
cap = speed_to_caps(speed);
- if (!(lc->supported & cap) || (speed == SPEED_1000) ||
- (speed == SPEED_10000))
+ if (!(lc->supported & cap) ||
+ (speed == 1000) ||
+ (speed == 10000) ||
+ (speed == 40000))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
@@ -3205,8 +3232,8 @@ static int cxgb4_clip_get(const struct net_device *dev,
c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
- *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
- *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ c.ip_hi = *(__be64 *)(lip->s6_addr);
+ c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
}
@@ -3221,8 +3248,8 @@ static int cxgb4_clip_release(const struct net_device *dev,
c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
- *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
- *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ c.ip_hi = *(__be64 *)(lip->s6_addr);
+ c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
}
@@ -3563,14 +3590,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
static void disable_txq_db(struct sge_txq *q)
{
- spin_lock_irq(&q->db_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->db_lock, flags);
q->db_disabled = 1;
- spin_unlock_irq(&q->db_lock);
+ spin_unlock_irqrestore(&q->db_lock, flags);
}
-static void enable_txq_db(struct sge_txq *q)
+static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
{
spin_lock_irq(&q->db_lock);
+ if (q->db_pidx_inc) {
+ /* Make sure that all writes to the TX descriptors
+ * are committed before we tell HW about them.
+ */
+ wmb();
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
+ q->db_pidx_inc = 0;
+ }
q->db_disabled = 0;
spin_unlock_irq(&q->db_lock);
}
@@ -3592,11 +3630,32 @@ static void enable_dbs(struct adapter *adap)
int i;
for_each_ethrxq(&adap->sge, i)
- enable_txq_db(&adap->sge.ethtxq[i].q);
+ enable_txq_db(adap, &adap->sge.ethtxq[i].q);
for_each_ofldrxq(&adap->sge, i)
- enable_txq_db(&adap->sge.ofldtxq[i].q);
+ enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
- enable_txq_db(&adap->sge.ctrlq[i].q);
+ enable_txq_db(adap, &adap->sge.ctrlq[i].q);
+}
+
+static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
+{
+ if (adap->uld_handle[CXGB4_ULD_RDMA])
+ ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
+ cmd);
+}
+
+static void process_db_full(struct work_struct *work)
+{
+ struct adapter *adap;
+
+ adap = container_of(work, struct adapter, db_full_task);
+
+ drain_db_fifo(adap, dbfifo_drain_delay);
+ enable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
+ t4_set_reg_field(adap, SGE_INT_ENABLE3,
+ DBFIFO_HP_INT | DBFIFO_LP_INT,
+ DBFIFO_HP_INT | DBFIFO_LP_INT);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3604,7 +3663,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
u16 hw_pidx, hw_cidx;
int ret;
- spin_lock_bh(&q->db_lock);
+ spin_lock_irq(&q->db_lock);
ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
if (ret)
goto out;
@@ -3621,7 +3680,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
}
out:
q->db_disabled = 0;
- spin_unlock_bh(&q->db_lock);
+ q->db_pidx_inc = 0;
+ spin_unlock_irq(&q->db_lock);
if (ret)
CH_WARN(adap, "DB drop recovery failed.\n");
}
@@ -3637,29 +3697,6 @@ static void recover_all_queues(struct adapter *adap)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
}
-static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
-{
- mutex_lock(&uld_mutex);
- if (adap->uld_handle[CXGB4_ULD_RDMA])
- ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
- cmd);
- mutex_unlock(&uld_mutex);
-}
-
-static void process_db_full(struct work_struct *work)
-{
- struct adapter *adap;
-
- adap = container_of(work, struct adapter, db_full_task);
-
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
- drain_db_fifo(adap, dbfifo_drain_delay);
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT,
- DBFIFO_HP_INT | DBFIFO_LP_INT);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-}
-
static void process_db_drop(struct work_struct *work)
{
struct adapter *adap;
@@ -3667,11 +3704,13 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
if (is_t4(adap->params.chip)) {
- disable_dbs(adap);
+ drain_db_fifo(adap, dbfifo_drain_delay);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
- drain_db_fifo(adap, 1);
+ drain_db_fifo(adap, dbfifo_drain_delay);
recover_all_queues(adap);
+ drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
} else {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
@@ -3712,6 +3751,8 @@ static void process_db_drop(struct work_struct *work)
void t4_db_full(struct adapter *adap)
{
if (is_t4(adap->params.chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
queue_work(workq, &adap->db_full_task);
@@ -3720,8 +3761,11 @@ void t4_db_full(struct adapter *adap)
void t4_db_dropped(struct adapter *adap)
{
- if (is_t4(adap->params.chip))
- queue_work(workq, &adap->db_drop_task);
+ if (is_t4(adap->params.chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
+ }
+ queue_work(workq, &adap->db_drop_task);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3765,6 +3809,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.dbfifo_int_thresh = dbfifo_int_thresh;
lli.sge_pktshift = adap->sge.pktshift;
lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -5370,6 +5415,21 @@ static int adap_init0(struct adapter *adap)
(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
/*
+ * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+ * capability. Earlier versions of the firmware didn't have the
+ * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+ * permission to use ULPTX MEMWRITE DSGL.
+ */
+ if (is_t4(adap->params.chip)) {
+ adap->params.ulptx_memwrite_dsgl = false;
+ } else {
+ params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+ 1, params, val);
+ adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+ }
+
+ /*
* Get device capabilities so we can determine what resources we need
* to manage.
*/
@@ -5603,9 +5663,10 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
-static inline bool is_10g_port(const struct link_config *lc)
+static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+ (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
}
static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
@@ -5629,7 +5690,7 @@ static void cfg_queues(struct adapter *adap)
int i, q10g = 0, n10g = 0, qidx = 0;
for_each_port(adap, i)
- n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
@@ -5644,7 +5705,7 @@ static void cfg_queues(struct adapter *adap)
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
qidx += pi->nqsets;
}
@@ -5737,7 +5798,7 @@ static void reduce_ethqs(struct adapter *adap, int n)
static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
- int i, err, want, need;
+ int i, want, need;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
struct msix_entry entries[MAX_INGQ + 1];
@@ -5753,32 +5814,30 @@ static int enable_msix(struct adapter *adap)
}
need = adap->params.nports + EXTRA_VECS + ofld_need;
- while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
- want = err;
+ want = pci_enable_msix_range(adap->pdev, entries, need, want);
+ if (want < 0)
+ return want;
- if (!err) {
- /*
- * Distribute available vectors to the various queue groups.
- * Every group gets its minimum requirement and NIC gets top
- * priority for leftovers.
- */
- i = want - EXTRA_VECS - ofld_need;
- if (i < s->max_ethqsets) {
- s->max_ethqsets = i;
- if (i < s->ethqsets)
- reduce_ethqs(adap, i);
- }
- if (is_offload(adap)) {
- i = want - EXTRA_VECS - s->max_ethqsets;
- i -= ofld_need - nchan;
- s->ofldqsets = (i / nchan) * nchan; /* round down */
- }
- for (i = 0; i < want; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- } else if (err > 0)
- dev_info(adap->pdev_dev,
- "only %d MSI-X vectors left, not using MSI-X\n", err);
- return err;
+ /*
+ * Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ i = want - EXTRA_VECS - ofld_need;
+ if (i < s->max_ethqsets) {
+ s->max_ethqsets = i;
+ if (i < s->ethqsets)
+ reduce_ethqs(adap, i);
+ }
+ if (is_offload(adap)) {
+ i = want - EXTRA_VECS - s->max_ethqsets;
+ i -= ofld_need - nchan;
+ s->ofldqsets = (i / nchan) * nchan; /* round down */
+ }
+ for (i = 0; i < want; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
#undef EXTRA_VECS
@@ -5801,11 +5860,6 @@ static int init_rss(struct adapter *adap)
static void print_port_info(const struct net_device *dev)
{
- static const char *base[] = {
- "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
- "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
- };
-
char buf[80];
char *bufp = buf;
const char *spd = "";
@@ -5823,9 +5877,11 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+ bufp += sprintf(bufp, "40G/");
if (bufp != buf)
--bufp;
- sprintf(bufp, "BASE-%s", base[pi->port_type]);
+ sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id,
@@ -5833,8 +5889,8 @@ static void print_port_info(const struct net_device *dev)
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
- netdev_info(dev, "S/N: %s, E/C: %s\n",
- adap->params.vpd.sn, adap->params.vpd.ec);
+ netdev_info(dev, "S/N: %s, P/N: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.pn);
}
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4dd0a82533e4..e274a047528f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -253,6 +253,7 @@ struct cxgb4_lld_info {
/* packet data */
bool enable_fw_ofld_conn; /* Enable connection through fw */
/* WR */
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 47ffa64fcf19..ca95cf2954eb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -93,6 +93,16 @@
*/
#define TX_QCHECK_PERIOD (HZ / 2)
+/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
+ * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
+ * State Machines in the same state for this amount of time (in HZ) then we'll
+ * issue a warning about a potential hang. We'll repeat the warning as the
+ * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
+ * the situation clears. If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH (1 * HZ)
+#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
+
/*
* Max number of Tx descriptors to be reclaimed by the Tx timer.
*/
@@ -373,7 +383,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
if (d->skb) { /* an SGL is present */
if (unmap)
unmap_sgl(dev, d->skb, d->sgl, q);
- kfree_skb(d->skb);
+ dev_consume_skb_any(d->skb);
d->skb = NULL;
}
++d;
@@ -706,11 +716,17 @@ static inline unsigned int flits_to_desc(unsigned int n)
* @skb: the packet
*
* Returns whether an Ethernet packet is small enough to fit as
- * immediate data.
+ * immediate data. Return value corresponds to headroom required.
*/
static inline int is_eth_imm(const struct sk_buff *skb)
{
- return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+ int hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
}
/**
@@ -723,9 +739,10 @@ static inline int is_eth_imm(const struct sk_buff *skb)
static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
{
unsigned int flits;
+ int hdrlen = is_eth_imm(skb);
- if (is_eth_imm(skb))
- return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
if (skb_shinfo(skb)->gso_size)
@@ -843,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
unsigned int *wr, index;
+ unsigned long flags;
wmb(); /* write descriptors before telling HW */
- spin_lock(&q->db_lock);
+ spin_lock_irqsave(&q->db_lock, flags);
if (!q->db_disabled) {
if (is_t4(adap->params.chip)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
@@ -861,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
writel(n, adap->bar2 + q->udb + 8);
wmb();
}
- }
+ } else
+ q->db_pidx_inc += n;
q->db_pidx = q->pidx;
- spin_unlock(&q->db_lock);
+ spin_unlock_irqrestore(&q->db_lock, flags);
}
/**
@@ -971,6 +990,7 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int len;
u32 wr_mid;
u64 cntrl, *end;
int qidx, credits;
@@ -982,13 +1002,14 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
/*
* The chip min packet length is 10 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
if (unlikely(skb->len < ETH_HLEN)) {
-out_free: dev_kfree_skb(skb);
+out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1011,7 +1032,10 @@ out_free: dev_kfree_skb(skb);
return NETDEV_TX_BUSY;
}
- if (!is_eth_imm(skb) &&
+ if (is_eth_imm(skb))
+ immediate = true;
+
+ if (!immediate &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
goto out_free;
@@ -1028,6 +1052,7 @@ out_free: dev_kfree_skb(skb);
wr->r3 = cpu_to_be64(0);
end = (u64 *)wr + flits;
+ len = immediate ? skb->len : 0;
ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
@@ -1035,8 +1060,9 @@ out_free: dev_kfree_skb(skb);
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(sizeof(*lso)));
+ FW_WR_IMMDLEN(len));
lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE | LSO_LAST_SLICE |
LSO_IPV6(v6) |
@@ -1054,9 +1080,7 @@ out_free: dev_kfree_skb(skb);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
- int len;
-
- len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+ len += sizeof(*cpl);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN(len));
cpl = (void *)(wr + 1);
@@ -1078,9 +1102,9 @@ out_free: dev_kfree_skb(skb);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
- if (is_eth_imm(skb)) {
+ if (immediate) {
inline_tx_skb(skb, &q->q, cpl + 1);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
} else {
int last_desc;
@@ -1467,8 +1491,12 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
{
unsigned int idx = skb_txq(skb);
- if (unlikely(is_ctrl_pkt(skb)))
+ if (unlikely(is_ctrl_pkt(skb))) {
+ /* Single ctrl queue is a requirement for LE workaround path */
+ if (adap->tids.nsftids)
+ idx = 0;
return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+ }
return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
}
@@ -1992,7 +2020,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
static void sge_rx_timer_cb(unsigned long data)
{
unsigned long m;
- unsigned int i, cnt[2];
+ unsigned int i, idma_same_state_cnt[2];
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
@@ -2015,21 +2043,64 @@ static void sge_rx_timer_cb(unsigned long data)
}
t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
- cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
- cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
-
- for (i = 0; i < 2; i++)
- if (cnt[i] >= s->starve_thres) {
- if (s->idma_state[i] || cnt[i] == 0xffffffff)
- continue;
- s->idma_state[i] = 1;
- t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
- m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
- dev_warn(adap->pdev_dev,
- "SGE idma%u starvation detected for "
- "queue %lu\n", i, m & 0xffff);
- } else if (s->idma_state[i])
- s->idma_state[i] = 0;
+ idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
+ idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+
+ for (i = 0; i < 2; i++) {
+ u32 debug0, debug11;
+
+ /* If the Ingress DMA Same State Counter ("timer") is less
+ * than 1s, then we can reset our synthesized Stall Timer and
+ * continue. If we have previously emitted warnings about a
+ * potential stalled Ingress Queue, issue a note indicating
+ * that the Ingress Queue has resumed forward progress.
+ */
+ if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
+ if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
+ CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
+ i, s->idma_qid[i],
+ s->idma_stalled[i]/HZ);
+ s->idma_stalled[i] = 0;
+ continue;
+ }
+
+ /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+ * domain. The first time we get here it'll be because we
+ * passed the 1s Threshold; each additional time it'll be
+ * because the RX Timer Callback is being fired on its regular
+ * schedule.
+ *
+ * If the stall is below our Potential Hung Ingress Queue
+ * Warning Threshold, continue.
+ */
+ if (s->idma_stalled[i] == 0)
+ s->idma_stalled[i] = HZ;
+ else
+ s->idma_stalled[i] += RX_QCHECK_PERIOD;
+
+ if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
+ continue;
+
+ /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
+ if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
+ continue;
+
+ /* Read and save the SGE IDMA State and Queue ID information.
+ * We do this every time in case it changes across time ...
+ */
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
+ debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+ t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
+ debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+ s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+ CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
+ i, s->idma_qid[i], s->idma_state[i],
+ s->idma_stalled[i]/HZ, debug0, debug11);
+ t4_sge_decode_idma_state(adap, s->idma_state[i]);
+ }
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
}
@@ -2580,11 +2651,19 @@ static int t4_sge_init_soft(struct adapter *adap)
fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
#undef READ_FL_BUF
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
if (fl_small_pg != PAGE_SIZE ||
- (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
- (fl_large_pg & (fl_large_pg-1)) != 0))) {
+ (fl_large_pg & (fl_large_pg-1)) != 0) {
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
return -EINVAL;
@@ -2699,8 +2778,8 @@ static int t4_sge_init_hard(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control;
- int ret;
+ u32 sge_control, sge_conm_ctrl;
+ int ret, egress_threshold;
/*
* Ingress Padding Boundary and Egress Status Page Size are set up by
@@ -2725,15 +2804,24 @@ int t4_sge_init(struct adapter *adap)
* SGE's Egress Congestion Threshold. If it isn't, then we can get
* stuck waiting for new packets while the SGE is waiting for us to
* give it more Free List entries. (Note that the SGE's Egress
- * Congestion Threshold is in units of 2 Free List pointers.)
+ * Congestion Threshold is in units of 2 Free List pointers.) For T4,
+ * there was only a single field to control this. For T5 there's the
+ * original field which now only applies to Unpacked Mode Free List
+ * buffers and a new field which only applies to Packed Mode Free List
+ * buffers.
*/
- s->fl_starve_thres
- = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
+ sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
+ if (is_t4(adap->params.chip))
+ egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
+ else
+ egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
+ s->fl_starve_thres = 2*egress_threshold + 1;
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
- s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
- s->idma_state[0] = s->idma_state[1] = 0;
+ s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
+ s->idma_stalled[0] = 0;
+ s->idma_stalled[1] = 0;
spin_lock_init(&s->intrq_lock);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2c109343d570..fb2fe65903c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -573,7 +573,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
u32 cclk_param, cclk_val;
int i, ret, addr;
- int ec, sn;
+ int ec, sn, pn;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -638,6 +638,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
+ FIND_VPD_KW(pn, "PN");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -647,6 +648,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
+ memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+ strim(p->pn);
/*
* Ask firmware for the Core Clock since it knows how to translate the
@@ -1155,7 +1158,8 @@ out:
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_ANEG)
/**
* t4_link_start - apply link configuration to MAC/PHY
@@ -2247,6 +2251,36 @@ static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
}
/**
+ * t4_get_port_type_description - return Port Type string description
+ * @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+ static const char *const port_type_description[] = {
+ "R XFI",
+ "R XAUI",
+ "T SGMII",
+ "T XFI",
+ "T XAUI",
+ "KX4",
+ "CX4",
+ "KX",
+ "KR",
+ "R SFP+",
+ "KR/KX",
+ "KR/KX/KX4",
+ "R QSFP_10G",
+ "",
+ "R QSFP",
+ "R BP40_BA",
+ };
+
+ if (port_type < ARRAY_SIZE(port_type_description))
+ return port_type_description[port_type];
+ return "UNKNOWN";
+}
+
+/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
@@ -2563,6 +2597,112 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
}
/**
+ * t4_sge_decode_idma_state - decode the idma state
+ * @adap: the adapter
+ * @state: the state idma is stuck in
+ */
+void t4_sge_decode_idma_state(struct adapter *adapter, int state)
+{
+ static const char * const t4_decode[] = {
+ "IDMA_IDLE",
+ "IDMA_PUSH_MORE_CPL_FIFO",
+ "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+ "Not used",
+ "IDMA_PHYSADDR_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+ "IDMA_PHYSADDR_SEND_PAYLOAD",
+ "IDMA_SEND_FIFO_TO_IMSG",
+ "IDMA_FL_REQ_DATA_FL_PREP",
+ "IDMA_FL_REQ_DATA_FL",
+ "IDMA_FL_DROP",
+ "IDMA_FL_H_REQ_HEADER_FL",
+ "IDMA_FL_H_SEND_PCIEHDR",
+ "IDMA_FL_H_PUSH_CPL_FIFO",
+ "IDMA_FL_H_SEND_CPL",
+ "IDMA_FL_H_SEND_IP_HDR_FIRST",
+ "IDMA_FL_H_SEND_IP_HDR",
+ "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+ "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_H_SEND_IP_HDR_PADDING",
+ "IDMA_FL_D_SEND_PCIEHDR",
+ "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+ "IDMA_FL_D_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_PCIEHDR",
+ "IDMA_FL_PUSH_CPL_FIFO",
+ "IDMA_FL_SEND_CPL",
+ "IDMA_FL_SEND_PAYLOAD_FIRST",
+ "IDMA_FL_SEND_PAYLOAD",
+ "IDMA_FL_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_SEND_PADDING",
+ "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+ "IDMA_FL_SEND_FIFO_TO_IMSG",
+ "IDMA_FL_REQ_DATAFL_DONE",
+ "IDMA_FL_REQ_HEADERFL_DONE",
+ };
+ static const char * const t5_decode[] = {
+ "IDMA_IDLE",
+ "IDMA_ALMOST_IDLE",
+ "IDMA_PUSH_MORE_CPL_FIFO",
+ "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+ "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PCIEHDR",
+ "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+ "IDMA_PHYSADDR_SEND_PAYLOAD",
+ "IDMA_SEND_FIFO_TO_IMSG",
+ "IDMA_FL_REQ_DATA_FL",
+ "IDMA_FL_DROP",
+ "IDMA_FL_DROP_SEND_INC",
+ "IDMA_FL_H_REQ_HEADER_FL",
+ "IDMA_FL_H_SEND_PCIEHDR",
+ "IDMA_FL_H_PUSH_CPL_FIFO",
+ "IDMA_FL_H_SEND_CPL",
+ "IDMA_FL_H_SEND_IP_HDR_FIRST",
+ "IDMA_FL_H_SEND_IP_HDR",
+ "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+ "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_H_SEND_IP_HDR_PADDING",
+ "IDMA_FL_D_SEND_PCIEHDR",
+ "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+ "IDMA_FL_D_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_PCIEHDR",
+ "IDMA_FL_PUSH_CPL_FIFO",
+ "IDMA_FL_SEND_CPL",
+ "IDMA_FL_SEND_PAYLOAD_FIRST",
+ "IDMA_FL_SEND_PAYLOAD",
+ "IDMA_FL_REQ_NEXT_DATA_FL",
+ "IDMA_FL_SEND_NEXT_PCIEHDR",
+ "IDMA_FL_SEND_PADDING",
+ "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+ };
+ static const u32 sge_regs[] = {
+ SGE_DEBUG_DATA_LOW_INDEX_2,
+ SGE_DEBUG_DATA_LOW_INDEX_3,
+ SGE_DEBUG_DATA_HIGH_INDEX_10,
+ };
+ const char **sge_idma_decode;
+ int sge_idma_decode_nstates;
+ int i;
+
+ if (is_t4(adapter->params.chip)) {
+ sge_idma_decode = (const char **)t4_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
+ } else {
+ sge_idma_decode = (const char **)t5_decode;
+ sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
+ }
+
+ if (state < sge_idma_decode_nstates)
+ CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
+ else
+ CH_WARN(adapter, "idma state %d unknown\n", state);
+
+ for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
+ CH_WARN(adapter, "SGE register %#x value %#x\n",
+ sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
+}
+
+/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -3533,11 +3673,13 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (stat & FW_PORT_CMD_TXPAUSE)
fc |= PAUSE_TX;
if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
- speed = SPEED_100;
+ speed = 100;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
- speed = SPEED_1000;
+ speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
- speed = SPEED_10000;
+ speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) { /* something changed */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index cd6874b571ee..f2738c710789 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -116,6 +116,7 @@ enum CPL_error {
CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
CPL_ERR_RTX_NEG_ADVICE = 35,
CPL_ERR_PERSIST_NEG_ADVICE = 36,
+ CPL_ERR_KEEPALV_NEG_ADVICE = 37,
CPL_ERR_ABORT_FAILED = 42,
CPL_ERR_IWARP_FLM = 50,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 4082522d8140..225ad8a5722d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -230,6 +230,12 @@
#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+#define EGRTHRESHOLDPACKING_MASK 0x3fU
+#define EGRTHRESHOLDPACKING_SHIFT 14
+#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT)
+#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
+ EGRTHRESHOLDPACKING_MASK)
+
#define SGE_DBFIFO_STATUS 0x10a4
#define HP_INT_THRESH_SHIFT 28
#define HP_INT_THRESH_MASK 0xfU
@@ -278,6 +284,9 @@
#define SGE_DEBUG_INDEX 0x10cc
#define SGE_DEBUG_DATA_HIGH 0x10d0
#define SGE_DEBUG_DATA_LOW 0x10d4
+#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
+#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
+#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
#define S_HP_INT_THRESH 28
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 74fea74ce0aa..9cc973fbcf26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -932,6 +932,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
FW_PARAMS_PARAM_DEV_CF = 0x0D,
+ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
};
/*
@@ -1742,6 +1743,9 @@ enum fw_port_type {
FW_PORT_TYPE_SFP,
FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_BP4_AP,
+ FW_PORT_TYPE_QSFP_10G,
+ FW_PORT_TYPE_QSFP,
+ FW_PORT_TYPE_BP40_BA,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0899c0983594..52859288de7b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n)
*/
static int enable_msix(struct adapter *adapter)
{
- int i, err, want, need;
+ int i, want, need, nqsets;
struct msix_entry entries[MSIX_ENTRIES];
struct sge *s = &adapter->sge;
@@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter)
*/
want = s->max_ethqsets + MSIX_EXTRAS;
need = adapter->params.nports + MSIX_EXTRAS;
- while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
- want = err;
- if (err == 0) {
- int nqsets = want - MSIX_EXTRAS;
- if (nqsets < s->max_ethqsets) {
- dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
- " for %d Queue Sets\n", nqsets);
- s->max_ethqsets = nqsets;
- if (nqsets < s->ethqsets)
- reduce_ethqs(adapter, nqsets);
- }
- for (i = 0; i < want; ++i)
- adapter->msix_info[i].vec = entries[i].vector;
- } else if (err > 0) {
- pci_disable_msix(adapter->pdev);
- dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
- " not using MSI-X\n", err);
+ want = pci_enable_msix_range(adapter->pdev, entries, need, want);
+ if (want < 0)
+ return want;
+
+ nqsets = want - MSIX_EXTRAS;
+ if (nqsets < s->max_ethqsets) {
+ dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+ " for %d Queue Sets\n", nqsets);
+ s->max_ethqsets = nqsets;
+ if (nqsets < s->ethqsets)
+ reduce_ethqs(adapter, nqsets);
}
- return err;
+ for (i = 0; i < want; ++i)
+ adapter->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
static const struct net_device_ops cxgb4vf_netdev_ops = {
@@ -2947,6 +2944,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
CH_DEVICE(0x5811, 0), /* T520-lp-cr */
CH_DEVICE(0x5812, 0), /* T560-cr */
CH_DEVICE(0x5813, 0), /* T580-cr */
+ CH_DEVICE(0x5814, 0), /* T580-so-cr */
+ CH_DEVICE(0x5815, 0), /* T502-bt */
+ CH_DEVICE(0x5880, 0),
+ CH_DEVICE(0x5881, 0),
+ CH_DEVICE(0x5882, 0),
+ CH_DEVICE(0x5883, 0),
+ CH_DEVICE(0x5884, 0),
+ CH_DEVICE(0x5885, 0),
{ 0, }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0a89963c48ce..9cfa4b4bb089 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
if (sdesc->skb) {
if (need_unmap)
unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
- kfree_skb(sdesc->skb);
+ dev_consume_skb_any(sdesc->skb);
sdesc->skb = NULL;
}
@@ -1275,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* need it any longer.
*/
inline_tx_skb(skb, &txq->q, cpl + 1);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
} else {
/*
* Write the skb's Scatter/Gather list into the TX Packet CPL
@@ -1354,7 +1354,7 @@ out_free:
* An error of some sort happened. Free the TX skb and tell the
* OS that we've "dealt" with the packet ...
*/
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 19f642a45f40..fe84fbabc0d4 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1174,7 +1174,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
spin_unlock_irqrestore(&lp->lock, flags);
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
/* We DO NOT call netif_wake_queue() here.
* We also DO NOT call netif_start_queue().
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b740bfce72ef..2945718ce806 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -521,7 +521,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
unsigned int txq_map;
if (skb->len <= 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -536,7 +536,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (skb_shinfo(skb)->gso_size == 0 &&
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1086,14 +1086,15 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int intr = enic_legacy_io_intr();
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = -1; /* no limit */
- unsigned int work_done, rq_work_done, wq_work_done;
+ unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
/* Service RQ (first) and WQ
*/
- rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
- rq_work_to_do, enic_rq_service, NULL);
+ if (budget > 0)
+ rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
+ rq_work_to_do, enic_rq_service, NULL);
wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
wq_work_to_do, enic_wq_service, NULL);
@@ -1141,14 +1142,15 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
unsigned int cq = enic_cq_rq(enic, rq);
unsigned int intr = enic_msix_rq_intr(enic, rq);
unsigned int work_to_do = budget;
- unsigned int work_done;
+ unsigned int work_done = 0;
int err;
/* Service RQ
*/
- work_done = vnic_cq_service(&enic->cq[cq],
- work_to_do, enic_rq_service, NULL);
+ if (budget > 0)
+ work_done = vnic_cq_service(&enic->cq[cq],
+ work_to_do, enic_rq_service, NULL);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1796,7 +1798,8 @@ static int enic_set_intr_mode(struct enic *enic)
enic->cq_count >= n + m &&
enic->intr_count >= n + m + 2) {
- if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
+ if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ n + m + 2, n + m + 2) > 0) {
enic->rq_count = n;
enic->wq_count = m;
@@ -1815,7 +1818,8 @@ static int enic_set_intr_mode(struct enic *enic)
enic->wq_count >= m &&
enic->cq_count >= 1 + m &&
enic->intr_count >= 1 + m + 2) {
- if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
+ if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ 1 + m + 2, 1 + m + 2) > 0) {
enic->rq_count = 1;
enic->wq_count = m;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a1a2b4028a5c..8c4b93be333b 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1033,7 +1033,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 5ad9e3e3c0b8..53f0c618045c 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -696,7 +696,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
pr_err("big packet = %d\n", (u16)skb->len);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -743,7 +743,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
dw32(DCR7, db->cr7_data);
/* free this SKB */
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa4ee385091f..aa801a6af7b9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -607,7 +607,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
netdev_err(dev, "big packet = %d\n", (u16)skb->len);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -648,7 +648,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
uw32(DCR7, db->cr7_data);
/* free this SKB */
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 113cd799a131..d9e5ca0d48c1 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1137,7 +1137,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop_frame:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
np->tx_skbuff[entry] = NULL;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 8a79a32a5674..e9b0faba3078 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -170,11 +170,6 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
-static int dnet_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void dnet_handle_link_change(struct net_device *dev)
{
struct dnet *bp = netdev_priv(dev);
@@ -322,7 +317,6 @@ static int dnet_mii_init(struct dnet *bp)
bp->mii_bus->name = "dnet_mii_bus";
bp->mii_bus->read = &dnet_mdio_read;
bp->mii_bus->write = &dnet_mdio_write;
- bp->mii_bus->reset = &dnet_mdio_reset;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 231129dd1764..ea94a8eb6b35 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -4,3 +4,11 @@ config BE2NET
---help---
This driver implements the NIC functionality for ServerEngines'
10Gbps network adapter - BladeEngine.
+
+config BE2NET_VXLAN
+ bool "VXLAN offload support on be2net driver"
+ default y
+ depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m)
+ ---help---
+ Say Y here if you want to enable VXLAN offload support on
+ be2net driver.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 05529e273050..8ccaa2520dc3 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.0.600.0u"
+#define DRV_VER "10.2u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -88,7 +88,6 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_MIN_MTU 256
#define BE_NUM_VLANS_SUPPORTED 64
-#define BE_UMC_NUM_VLANS_SUPPORTED 15
#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
@@ -262,9 +261,10 @@ struct be_tx_obj {
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
struct page *page;
+ /* set to page-addr for last frag of the page & frag-addr otherwise */
DEFINE_DMA_UNMAP_ADDR(bus);
u16 page_offset;
- bool last_page_user;
+ bool last_frag; /* last frag of the page */
};
struct be_rx_stats {
@@ -293,9 +293,10 @@ struct be_rx_compl_info {
u8 ip_csum;
u8 l4_csum;
u8 ipv6;
- u8 vtm;
+ u8 qnq;
u8 pkt_type;
u8 ip_frag;
+ u8 tunneled;
};
struct be_rx_obj {
@@ -359,6 +360,7 @@ struct be_vf_cfg {
int pmac_id;
u16 vlan_tag;
u32 tx_rate;
+ u32 plink_tracking;
};
enum vf_state {
@@ -370,10 +372,11 @@ enum vf_state {
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
#define BE_FLAGS_VLAN_PROMISC (1 << 4)
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
-#define BE_UC_PMAC_COUNT 30
-#define BE_VF_UC_PMAC_COUNT 2
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
+#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
+#define BE_UC_PMAC_COUNT 30
+#define BE_VF_UC_PMAC_COUNT 2
/* Ethtool set_dump flags */
#define LANCER_INITIATE_FW_DUMP 0x1
@@ -467,6 +470,7 @@ struct be_adapter {
u32 port_num;
bool promiscuous;
+ u8 mc_type;
u32 function_mode;
u32 function_caps;
u32 rx_fc; /* Rx flow control */
@@ -492,6 +496,7 @@ struct be_adapter {
u32 sli_family;
u8 hba_port_num;
u16 pvid;
+ __be16 vxlan_port;
struct phy_info phy;
u8 wol_cap;
bool wol_en;
@@ -536,6 +541,14 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
return min_t(u16, num, num_online_cpus());
}
+/* Is BE in pvid_tagging mode */
+#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
+
+/* Is BE in QNQ multi-channel mode */
+#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \
+ adapter->mc_type == vNIC1 || \
+ adapter->mc_type == UFP)
+
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 48076a6370c3..d1ec15af0d24 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -202,8 +202,12 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
- /* Ignore physical link event */
- if (lancer_chip(adapter) &&
+ /* On BEx the FW does not send a separate link status
+ * notification for physical and logical link.
+ * On other chips just process the logical link
+ * status notification
+ */
+ if (!BEx_chip(adapter) &&
!(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
return;
@@ -211,7 +215,8 @@ static void be_async_link_state_process(struct be_adapter *adapter,
* it may not be received in some cases.
*/
if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
- be_link_status_update(adapter, evt->port_link_status);
+ be_link_status_update(adapter,
+ evt->port_link_status & LINK_STATUS_MASK);
}
/* Grp5 CoS Priority evt */
@@ -239,10 +244,12 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
struct be_async_event_grp5_pvid_state *evt)
{
- if (evt->enabled)
+ if (evt->enabled) {
adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
- else
+ dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
+ } else {
adapter->pvid = 0;
+ }
}
static void be_async_grp5_evt_process(struct be_adapter *adapter,
@@ -3296,6 +3303,21 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
return NULL;
}
+static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
+ return (struct be_port_res_desc *)hdr;
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return NULL;
+}
+
static void be_copy_nic_desc(struct be_resources *res,
struct be_nic_res_desc *desc)
{
@@ -3439,6 +3461,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
{
struct be_cmd_resp_get_profile_config *resp;
struct be_pcie_res_desc *pcie;
+ struct be_port_res_desc *port;
struct be_nic_res_desc *nic;
struct be_queue_info *mccq = &adapter->mcc_obj.q;
struct be_dma_mem cmd;
@@ -3466,6 +3489,10 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
+ port = be_get_port_desc(resp->func_param, desc_count);
+ if (port)
+ adapter->mc_type = port->mc_type;
+
nic = be_get_nic_desc(resp->func_param, desc_count);
if (nic)
be_copy_nic_desc(res, nic);
@@ -3476,14 +3503,11 @@ err:
return status;
}
-/* Currently only Lancer uses this command and it supports version 0 only
- * Uses sync mcc
- */
-int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
- u8 domain)
+int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+ int size, u8 version, u8 domain)
{
- struct be_mcc_wrb *wrb;
struct be_cmd_req_set_profile_config *req;
+ struct be_mcc_wrb *wrb;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -3495,44 +3519,116 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
}
req = embedded_payload(wrb);
-
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
wrb, NULL);
+ req->hdr.version = version;
req->hdr.domain = domain;
req->desc_count = cpu_to_le32(1);
- req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
- req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
- req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
- req->nic_desc.pf_num = adapter->pf_number;
- req->nic_desc.vf_num = domain;
-
- /* Mark fields invalid */
- req->nic_desc.unicast_mac_count = 0xFFFF;
- req->nic_desc.mcc_count = 0xFFFF;
- req->nic_desc.vlan_count = 0xFFFF;
- req->nic_desc.mcast_mac_count = 0xFFFF;
- req->nic_desc.txq_count = 0xFFFF;
- req->nic_desc.rq_count = 0xFFFF;
- req->nic_desc.rssq_count = 0xFFFF;
- req->nic_desc.lro_count = 0xFFFF;
- req->nic_desc.cq_count = 0xFFFF;
- req->nic_desc.toe_conn_count = 0xFFFF;
- req->nic_desc.eq_count = 0xFFFF;
- req->nic_desc.link_param = 0xFF;
- req->nic_desc.bw_min = 0xFFFFFFFF;
- req->nic_desc.acpi_params = 0xFF;
- req->nic_desc.wol_param = 0x0F;
-
- /* Change BW */
- req->nic_desc.bw_min = cpu_to_le32(bps);
- req->nic_desc.bw_max = cpu_to_le32(bps);
+ memcpy(req->desc, desc, size);
+
status = be_mcc_notify_wait(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+/* Mark all fields invalid */
+void be_reset_nic_desc(struct be_nic_res_desc *nic)
+{
+ memset(nic, 0, sizeof(*nic));
+ nic->unicast_mac_count = 0xFFFF;
+ nic->mcc_count = 0xFFFF;
+ nic->vlan_count = 0xFFFF;
+ nic->mcast_mac_count = 0xFFFF;
+ nic->txq_count = 0xFFFF;
+ nic->rq_count = 0xFFFF;
+ nic->rssq_count = 0xFFFF;
+ nic->lro_count = 0xFFFF;
+ nic->cq_count = 0xFFFF;
+ nic->toe_conn_count = 0xFFFF;
+ nic->eq_count = 0xFFFF;
+ nic->link_param = 0xFF;
+ nic->acpi_params = 0xFF;
+ nic->wol_param = 0x0F;
+ nic->bw_min = 0xFFFFFFFF;
+ nic->bw_max = 0xFFFFFFFF;
+}
+
+int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain)
+{
+ if (lancer_chip(adapter)) {
+ struct be_nic_res_desc nic_desc;
+
+ be_reset_nic_desc(&nic_desc);
+ nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
+ nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
+ nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
+ (1 << NOSV_SHIFT);
+ nic_desc.pf_num = adapter->pf_number;
+ nic_desc.vf_num = domain;
+ nic_desc.bw_max = cpu_to_le32(bps);
+
+ return be_cmd_set_profile_config(adapter, &nic_desc,
+ RESOURCE_DESC_SIZE_V0,
+ 0, domain);
+ } else {
+ return be_cmd_set_qos(adapter, bps, domain);
+ }
+}
+
+int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_manage_iface_filters *req;
+ int status;
+
+ if (iface == 0xFFFFFFFF)
+ return -1;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
+ wrb, NULL);
+ req->op = op;
+ req->target_iface_id = cpu_to_le32(iface);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
+{
+ struct be_port_res_desc port_desc;
+
+ memset(&port_desc, 0, sizeof(port_desc));
+ port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
+ port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+ port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+ port_desc.link_num = adapter->hba_port_num;
+ if (port) {
+ port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
+ (1 << RCVID_SHIFT);
+ port_desc.nv_port = swab16(port);
+ } else {
+ port_desc.nv_flags = NV_TYPE_DISABLED;
+ port_desc.nv_port = 0;
+ }
+
+ return be_cmd_set_profile_config(adapter, &port_desc,
+ RESOURCE_DESC_SIZE_V1, 1, 0);
+}
+
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num)
{
@@ -3723,6 +3819,45 @@ err:
return status;
}
+int be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, u8 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_ll_link *req;
+ int status;
+
+ if (BEx_chip(adapter) || lancer_chip(adapter))
+ return 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
+ sizeof(*req), wrb, NULL);
+
+ req->hdr.version = 1;
+ req->hdr.domain = domain;
+
+ if (link_state == IFLA_VF_LINK_STATE_ENABLE)
+ req->link_config |= 1;
+
+ if (link_state == IFLA_VF_LINK_STATE_AUTO)
+ req->link_config |= 1 << PLINK_TRACK_SHIFT;
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index fc4e076dc202..b60e4d53c1c9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -203,6 +203,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PORT_NAME 77
+#define OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG 80
#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
#define OPCODE_COMMON_GET_PHY_DETAILS 102
@@ -221,6 +222,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
+#define OPCODE_COMMON_MANAGE_IFACE_FILTERS 193
#define OPCODE_COMMON_GET_IFACE_LIST 194
#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
@@ -1098,14 +1100,6 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
-/* Is BE in a multi-channel mode */
-static inline bool be_is_mc(struct be_adapter *adapter)
-{
- return adapter->function_mode & FLEX10_MODE ||
- adapter->function_mode & VNIC_MODE ||
- adapter->function_mode & UMC_ENABLED;
-}
-
/******************** RSS Config ****************************************/
/* RSS type Input parameters used to compute RX hash
* RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
@@ -1828,20 +1822,36 @@ struct be_cmd_req_set_ext_fat_caps {
#define NIC_RESOURCE_DESC_TYPE_V0 0x41
#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
#define NIC_RESOURCE_DESC_TYPE_V1 0x51
+#define PORT_RESOURCE_DESC_TYPE_V1 0x55
#define MAX_RESOURCE_DESC 264
-/* QOS unit number */
-#define QUN 4
-/* Immediate */
-#define IMM 6
-/* No save */
-#define NOSV 7
+#define IMM_SHIFT 6 /* Immediate */
+#define NOSV_SHIFT 7 /* No save */
struct be_res_desc_hdr {
u8 desc_type;
u8 desc_len;
} __packed;
+struct be_port_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd0;
+ u8 flags;
+ u8 link_num;
+ u8 mc_type;
+ u16 rsvd1;
+
+#define NV_TYPE_MASK 0x3 /* bits 0-1 */
+#define NV_TYPE_DISABLED 1
+#define NV_TYPE_VXLAN 3
+#define SOCVID_SHIFT 2 /* Strip outer vlan */
+#define RCVID_SHIFT 4 /* Report vlan */
+ u8 nv_flags;
+ u8 rsvd2;
+ __le16 nv_port; /* vxlan/gre port */
+ u32 rsvd3[19];
+} __packed;
+
struct be_pcie_res_desc {
struct be_res_desc_hdr hdr;
u8 rsvd0;
@@ -1862,6 +1872,8 @@ struct be_pcie_res_desc {
struct be_nic_res_desc {
struct be_res_desc_hdr hdr;
u8 rsvd1;
+
+#define QUN_SHIFT 4 /* QoS is in absolute units */
u8 flags;
u8 vf_num;
u8 rsvd2;
@@ -1891,6 +1903,23 @@ struct be_nic_res_desc {
u32 rsvd8[7];
} __packed;
+/************ Multi-Channel type ***********/
+enum mc_type {
+ MC_NONE = 0x01,
+ UMC = 0x02,
+ FLEX10 = 0x03,
+ vNIC1 = 0x04,
+ nPAR = 0x05,
+ UFP = 0x06,
+ vNIC2 = 0x07
+};
+
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+ return adapter->mc_type > MC_NONE;
+}
+
struct be_cmd_req_get_func_config {
struct be_cmd_req_hdr hdr;
};
@@ -1919,7 +1948,7 @@ struct be_cmd_req_set_profile_config {
struct be_cmd_req_hdr hdr;
u32 rsvd;
u32 desc_count;
- struct be_nic_res_desc nic_desc;
+ u8 desc[RESOURCE_DESC_SIZE_V1];
};
struct be_cmd_resp_set_profile_config {
@@ -1971,6 +2000,33 @@ struct be_cmd_resp_get_iface_list {
struct be_if_desc if_desc;
};
+/*************** Set logical link ********************/
+#define PLINK_TRACK_SHIFT 8
+struct be_cmd_req_set_ll_link {
+ struct be_cmd_req_hdr hdr;
+ u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */
+};
+
+/************** Manage IFACE Filters *******************/
+#define OP_CONVERT_NORMAL_TO_TUNNEL 0
+#define OP_CONVERT_TUNNEL_TO_NORMAL 1
+
+struct be_cmd_req_manage_iface_filters {
+ struct be_cmd_req_hdr hdr;
+ u8 op;
+ u8 rsvd0;
+ u8 flags;
+ u8 rsvd1;
+ u32 tunnel_iface_id;
+ u32 target_iface_id;
+ u8 mac[6];
+ u16 vlan_tag;
+ u32 tenant_id;
+ u32 filter_id;
+ u32 cap_flags;
+ u32 cap_control_flags;
+} __packed;
+
int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -2045,7 +2101,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
int be_cmd_get_phy_info(struct be_adapter *adapter);
-int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain);
void be_detect_error(struct be_adapter *adapter);
int be_cmd_get_die_temperature(struct be_adapter *adapter);
int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
@@ -2086,9 +2142,14 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 domain);
-int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+ int size, u8 version, u8 domain);
int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, u8 domain);
+int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
+int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 05be0070f55f..15ba96cba65d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -357,10 +357,10 @@ be_get_ethtool_stats(struct net_device *netdev,
struct be_rx_stats *stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
+ start = u64_stats_fetch_begin_irq(&stats->sync);
data[base] = stats->rx_bytes;
data[base + 1] = stats->rx_pkts;
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+ } while (u64_stats_fetch_retry_irq(&stats->sync, start));
for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
p = (u8 *)stats + et_rx_stats[i].offset;
@@ -373,19 +373,19 @@ be_get_ethtool_stats(struct net_device *netdev,
struct be_tx_stats *stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_bh(&stats->sync_compl);
+ start = u64_stats_fetch_begin_irq(&stats->sync_compl);
data[base] = stats->tx_compl;
- } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
+ } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
+ start = u64_stats_fetch_begin_irq(&stats->sync);
for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
p = (u8 *)stats + et_tx_stats[i].offset;
data[base + i] =
(et_tx_stats[i].size == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+ } while (u64_stats_fetch_retry_irq(&stats->sync, start));
base += ETHTOOL_TXSTATS_NUM;
}
}
@@ -802,16 +802,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
- &data[0]) != 0) {
+ &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
- }
+
if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
- &data[1]) != 0) {
- test->flags |= ETH_TEST_FL_FAILED;
- }
- if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
- &data[2]) != 0) {
+ &data[1]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
+
+ if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+ &data[2]) != 0)
+ test->flags |= ETH_TEST_FL_FAILED;
+ test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index dc88782185f2..3bd198550edb 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -368,7 +368,7 @@ struct amap_eth_rx_compl_v0 {
u8 numfrags[3]; /* dword 1 */
u8 rss_flush; /* dword 2 */
u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
+ u8 qnq; /* dword 2 */
u8 rss_bank; /* dword 2 */
u8 rsvd1[23]; /* dword 2 */
u8 lro_pkt; /* dword 2 */
@@ -401,13 +401,14 @@ struct amap_eth_rx_compl_v1 {
u8 numfrags[3]; /* dword 1 */
u8 rss_flush; /* dword 2 */
u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
+ u8 qnq; /* dword 2 */
u8 rss_bank; /* dword 2 */
u8 port[2]; /* dword 2 */
u8 vntagp; /* dword 2 */
u8 header_len[8]; /* dword 2 */
u8 header_split[2]; /* dword 2 */
- u8 rsvd1[13]; /* dword 2 */
+ u8 rsvd1[12]; /* dword 2 */
+ u8 tunneled;
u8 valid; /* dword 2 */
u8 rsshash[32]; /* dword 3 */
} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 36c80612e21a..3e6df47b6973 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -23,6 +23,7 @@
#include <linux/aer.h>
#include <linux/if_bridge.h>
#include <net/busy_poll.h>
+#include <net/vxlan.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -591,10 +592,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
for_all_rx_queues(adapter, rxo, i) {
const struct be_rx_stats *rx_stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_bh(&rx_stats->sync);
+ start = u64_stats_fetch_begin_irq(&rx_stats->sync);
pkts = rx_stats(rxo)->rx_pkts;
bytes = rx_stats(rxo)->rx_bytes;
- } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
+ } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
stats->rx_packets += pkts;
stats->rx_bytes += bytes;
stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
@@ -605,10 +606,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
for_all_tx_queues(adapter, txo, i) {
const struct be_tx_stats *tx_stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_bh(&tx_stats->sync);
+ start = u64_stats_fetch_begin_irq(&tx_stats->sync);
pkts = tx_stats(txo)->tx_pkts;
bytes = tx_stats(txo)->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
+ } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
stats->tx_packets += pkts;
stats->tx_bytes += bytes;
}
@@ -652,7 +653,7 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
}
- if ((link_status & LINK_STATUS_MASK) == LINK_UP)
+ if (link_status)
netif_carrier_on(netdev);
else
netif_carrier_off(netdev);
@@ -718,10 +719,23 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
return vlan_tag;
}
+/* Used only for IP tunnel packets */
+static u16 skb_inner_ip_proto(struct sk_buff *skb)
+{
+ return (inner_ip_hdr(skb)->version == 4) ?
+ inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
+}
+
+static u16 skb_ip_proto(struct sk_buff *skb)
+{
+ return (ip_hdr(skb)->version == 4) ?
+ ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
{
- u16 vlan_tag;
+ u16 vlan_tag, proto;
memset(hdr, 0, sizeof(*hdr));
@@ -734,9 +748,15 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (is_tcp_pkt(skb))
+ if (skb->encapsulation) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+ proto = skb_inner_ip_proto(skb);
+ } else {
+ proto = skb_ip_proto(skb);
+ }
+ if (proto == IPPROTO_TCP)
AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
- else if (is_udp_pkt(skb))
+ else if (proto == IPPROTO_UDP)
AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
}
@@ -935,9 +955,9 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
}
/* If vlan tag is already inlined in the packet, skip HW VLAN
- * tagging in UMC mode
+ * tagging in pvid-tagging mode
*/
- if ((adapter->function_mode & UMC_ENABLED) &&
+ if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
*skip_hw_vlan = true;
@@ -1138,7 +1158,10 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
- goto ret;
+ return status;
+
+ if (adapter->vlan_tag[vid])
+ return status;
adapter->vlan_tag[vid] = 1;
adapter->vlans_added++;
@@ -1148,7 +1171,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
adapter->vlans_added--;
adapter->vlan_tag[vid] = 0;
}
-ret:
+
return status;
}
@@ -1288,6 +1311,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
+ vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
return 0;
}
@@ -1342,11 +1366,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
return -EINVAL;
}
- if (lancer_chip(adapter))
- status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
- else
- status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
-
+ status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
"tx rate %d on VF %d failed\n", rate, vf);
@@ -1354,6 +1374,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
adapter->vf_cfg[vf].tx_rate = rate;
return status;
}
+static int be_set_vf_link_state(struct net_device *netdev, int vf,
+ int link_state)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
+ if (!status)
+ adapter->vf_cfg[vf].plink_tracking = link_state;
+
+ return status;
+}
static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
ulong now)
@@ -1386,15 +1424,15 @@ static void be_eqd_update(struct be_adapter *adapter)
rxo = &adapter->rx_obj[eqo->idx];
do {
- start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+ start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
rx_pkts = rxo->stats.rx_pkts;
- } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
+ } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
txo = &adapter->tx_obj[eqo->idx];
do {
- start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+ start = u64_stats_fetch_begin_irq(&txo->stats.sync);
tx_pkts = txo->stats.tx_reqs;
- } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
+ } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
/* Skip, if wrapped around or first calculation */
@@ -1449,9 +1487,10 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
static inline bool csum_passed(struct be_rx_compl_info *rxcp)
{
/* L4 checksum is not reliable for non TCP/UDP packets.
- * Also ignore ipcksm for ipv6 pkts */
+ * Also ignore ipcksm for ipv6 pkts
+ */
return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
- (rxcp->ip_csum || rxcp->ipv6);
+ (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
}
static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
@@ -1464,11 +1503,15 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
rx_page_info = &rxo->page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
- if (rx_page_info->last_page_user) {
+ if (rx_page_info->last_frag) {
dma_unmap_page(&adapter->pdev->dev,
dma_unmap_addr(rx_page_info, bus),
adapter->big_page_size, DMA_FROM_DEVICE);
- rx_page_info->last_page_user = false;
+ rx_page_info->last_frag = false;
+ } else {
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ rx_frag_size, DMA_FROM_DEVICE);
}
queue_tail_inc(rxq);
@@ -1590,6 +1633,8 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
+
+ skb->encapsulation = rxcp->tunneled;
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1646,6 +1691,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
if (adapter->netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
+
+ skb->encapsulation = rxcp->tunneled;
skb_mark_napi_id(skb, napi);
if (rxcp->vlanf)
@@ -1676,12 +1723,14 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
rxcp->rss_hash =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
+ rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
compl);
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
+ rxcp->tunneled =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
}
static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
@@ -1706,7 +1755,7 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
rxcp->rss_hash =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
+ rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
compl);
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
compl);
@@ -1739,9 +1788,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->l4_csum = 0;
if (rxcp->vlanf) {
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
+ /* In QNQ modes, if qnq bit is not set, then the packet was
+ * tagged only with the transparent outer vlan-tag and must
+ * not be treated as a vlan packet by host
+ */
+ if (be_is_qnq_mode(adapter) && !rxcp->qnq)
rxcp->vlanf = 0;
if (!lancer_chip(adapter))
@@ -1800,17 +1851,16 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
rx_stats(rxo)->rx_post_fail++;
break;
}
- page_info->page_offset = 0;
+ page_offset = 0;
} else {
get_page(pagep);
- page_info->page_offset = page_offset + rx_frag_size;
+ page_offset += rx_frag_size;
}
- page_offset = page_info->page_offset;
+ page_info->page_offset = page_offset;
page_info->page = pagep;
- dma_unmap_addr_set(page_info, bus, page_dmaaddr);
- frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd = queue_head_node(rxq);
+ frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
@@ -1818,15 +1868,24 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
if ((page_offset + rx_frag_size + rx_frag_size) >
adapter->big_page_size) {
pagep = NULL;
- page_info->last_page_user = true;
+ page_info->last_frag = true;
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
+ } else {
+ dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
}
prev_page_info = page_info;
queue_head_inc(rxq);
page_info = &rxo->page_info_tbl[rxq->head];
}
- if (pagep)
- prev_page_info->last_page_user = true;
+
+ /* Mark the last frag of a page when we break out of the above loop
+ * with no more slots available in the RXQ
+ */
+ if (pagep) {
+ prev_page_info->last_frag = true;
+ dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
+ }
if (posted) {
atomic_add(posted, &rxq->used);
@@ -1883,7 +1942,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
queue_tail_inc(txq);
} while (cur_index != last_index);
- kfree_skb(sent_skb);
+ dev_kfree_skb_any(sent_skb);
return num_wrbs;
}
@@ -2439,6 +2498,9 @@ void be_detect_error(struct be_adapter *adapter)
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
+ bool error_detected = false;
+ struct device *dev = &adapter->pdev->dev;
+ struct net_device *netdev = adapter->netdev;
if (be_hw_error(adapter))
return;
@@ -2450,6 +2512,21 @@ void be_detect_error(struct be_adapter *adapter)
SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
SLIPORT_ERROR2_OFFSET);
+ adapter->hw_error = true;
+ /* Do not log error messages if its a FW reset */
+ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
+ sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
+ dev_info(dev, "Firmware update in progress\n");
+ } else {
+ error_detected = true;
+ dev_err(dev, "Error detected in the card\n");
+ dev_err(dev, "ERR: sliport status 0x%x\n",
+ sliport_status);
+ dev_err(dev, "ERR: sliport error1 0x%x\n",
+ sliport_err1);
+ dev_err(dev, "ERR: sliport error2 0x%x\n",
+ sliport_err2);
+ }
}
} else {
pci_read_config_dword(adapter->pdev,
@@ -2463,51 +2540,33 @@ void be_detect_error(struct be_adapter *adapter)
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
- }
-
- /* On certain platforms BE hardware can indicate spurious UEs.
- * Allow the h/w to stop working completely in case of a real UE.
- * Hence not setting the hw_error for UE detection.
- */
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- adapter->hw_error = true;
- /* Do not log error messages if its a FW reset */
- if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
- sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
- dev_info(&adapter->pdev->dev,
- "Firmware update in progress\n");
- return;
- } else {
- dev_err(&adapter->pdev->dev,
- "Error detected in the card\n");
- }
- }
-
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "ERR: sliport status 0x%x\n", sliport_status);
- dev_err(&adapter->pdev->dev,
- "ERR: sliport error1 0x%x\n", sliport_err1);
- dev_err(&adapter->pdev->dev,
- "ERR: sliport error2 0x%x\n", sliport_err2);
- }
- if (ue_lo) {
- for (i = 0; ue_lo; ue_lo >>= 1, i++) {
- if (ue_lo & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_low_desc[i]);
- }
- }
+ /* On certain platforms BE hardware can indicate spurious UEs.
+ * Allow HW to stop working completely in case of a real UE.
+ * Hence not setting the hw_error for UE detection.
+ */
- if (ue_hi) {
- for (i = 0; ue_hi; ue_hi >>= 1, i++) {
- if (ue_hi & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_hi_desc[i]);
+ if (ue_lo || ue_hi) {
+ error_detected = true;
+ dev_err(dev,
+ "Unrecoverable Error detected in the adapter");
+ dev_err(dev, "Please reboot server to recover");
+ if (skyhawk_chip(adapter))
+ adapter->hw_error = true;
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_low_desc[i]);
+ }
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_hi_desc[i]);
+ }
}
}
-
+ if (error_detected)
+ netif_carrier_off(netdev);
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -2521,7 +2580,7 @@ static void be_msix_disable(struct be_adapter *adapter)
static int be_msix_enable(struct be_adapter *adapter)
{
- int i, status, num_vec;
+ int i, num_vec;
struct device *dev = &adapter->pdev->dev;
/* If RoCE is supported, program the max number of NIC vectors that
@@ -2537,24 +2596,11 @@ static int be_msix_enable(struct be_adapter *adapter)
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
- if (status == 0) {
- goto done;
- } else if (status >= MIN_MSIX_VECTORS) {
- num_vec = status;
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- num_vec);
- if (!status)
- goto done;
- }
-
- dev_warn(dev, "MSIx enable failed\n");
+ num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ MIN_MSIX_VECTORS, num_vec);
+ if (num_vec < 0)
+ goto fail;
- /* INTx is not supported in VFs, so fail probe if enable_msix fails */
- if (!be_physfn(adapter))
- return status;
- return 0;
-done:
if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
adapter->num_msix_roce_vec = num_vec / 2;
dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
@@ -2566,6 +2612,14 @@ done:
dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
adapter->num_msix_vec);
return 0;
+
+fail:
+ dev_warn(dev, "MSIx enable failed\n");
+
+ /* INTx is not supported in VFs, so fail probe if enable_msix fails */
+ if (!be_physfn(adapter))
+ return num_vec;
+ return 0;
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -2807,6 +2861,12 @@ static int be_open(struct net_device *netdev)
netif_tx_start_all_queues(netdev);
be_roce_dev_open(adapter);
+
+#ifdef CONFIG_BE2NET_VXLAN
+ if (skyhawk_chip(adapter))
+ vxlan_get_rx_port(netdev);
+#endif
+
return 0;
err:
be_close(adapter->netdev);
@@ -2962,6 +3022,21 @@ static void be_mac_clear(struct be_adapter *adapter)
}
}
+#ifdef CONFIG_BE2NET_VXLAN
+static void be_disable_vxlan_offloads(struct be_adapter *adapter)
+{
+ if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
+ be_cmd_manage_iface(adapter, adapter->if_handle,
+ OP_CONVERT_TUNNEL_TO_NORMAL);
+
+ if (adapter->vxlan_port)
+ be_cmd_set_vxlan_port(adapter, 0);
+
+ adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
+ adapter->vxlan_port = 0;
+}
+#endif
+
static int be_clear(struct be_adapter *adapter)
{
be_cancel_worker(adapter);
@@ -2969,6 +3044,9 @@ static int be_clear(struct be_adapter *adapter)
if (sriov_enabled(adapter))
be_vf_clear(adapter);
+#ifdef CONFIG_BE2NET_VXLAN
+ be_disable_vxlan_offloads(adapter);
+#endif
/* delete the primary mac along with the uc-mac list */
be_mac_clear(adapter);
@@ -3093,15 +3171,19 @@ static int be_vf_setup(struct be_adapter *adapter)
* Allow full available bandwidth
*/
if (BE3_chip(adapter) && !old_vfs)
- be_cmd_set_qos(adapter, 1000, vf+1);
+ be_cmd_config_qos(adapter, 1000, vf + 1);
status = be_cmd_link_status_query(adapter, &lnk_speed,
NULL, vf + 1);
if (!status)
vf_cfg->tx_rate = lnk_speed;
- if (!old_vfs)
+ if (!old_vfs) {
be_cmd_enable_vf(adapter, vf + 1);
+ be_cmd_set_logical_link_config(adapter,
+ IFLA_VF_LINK_STATE_AUTO,
+ vf+1);
+ }
}
if (!old_vfs) {
@@ -3119,19 +3201,38 @@ err:
return status;
}
+/* Converting function_mode bits on BE3 to SH mc_type enums */
+
+static u8 be_convert_mc_type(u32 function_mode)
+{
+ if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
+ return vNIC1;
+ else if (function_mode & FLEX10_MODE)
+ return FLEX10;
+ else if (function_mode & VNIC_MODE)
+ return vNIC2;
+ else if (function_mode & UMC_ENABLED)
+ return UMC;
+ else
+ return MC_NONE;
+}
+
/* On BE2/BE3 FW does not suggest the supported limits */
static void BEx_get_resources(struct be_adapter *adapter,
struct be_resources *res)
{
struct pci_dev *pdev = adapter->pdev;
bool use_sriov = false;
- int max_vfs;
-
- max_vfs = pci_sriov_get_totalvfs(pdev);
-
- if (BE3_chip(adapter) && sriov_want(adapter)) {
- res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
- use_sriov = res->max_vfs;
+ int max_vfs = 0;
+
+ if (be_physfn(adapter) && BE3_chip(adapter)) {
+ be_cmd_get_profile_config(adapter, res, 0);
+ /* Some old versions of BE3 FW don't report max_vfs value */
+ if (res->max_vfs == 0) {
+ max_vfs = pci_sriov_get_totalvfs(pdev);
+ res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+ }
+ use_sriov = res->max_vfs && sriov_want(adapter);
}
if (be_physfn(adapter))
@@ -3139,17 +3240,32 @@ static void BEx_get_resources(struct be_adapter *adapter,
else
res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
- if (adapter->function_mode & FLEX10_MODE)
- res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
- else if (adapter->function_mode & UMC_ENABLED)
- res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
- else
+ adapter->mc_type = be_convert_mc_type(adapter->function_mode);
+
+ if (be_is_mc(adapter)) {
+ /* Assuming that there are 4 channels per port,
+ * when multi-channel is enabled
+ */
+ if (be_is_qnq_mode(adapter))
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else
+ /* In a non-qnq multichannel mode, the pvid
+ * takes up one vlan entry
+ */
+ res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
+ } else {
res->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ }
+
res->max_mcast_mac = BE_MAX_MC;
- /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
- if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
- !be_physfn(adapter) || (adapter->port_num > 1))
+ /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
+ * 2) Create multiple TX rings on a BE3-R multi-channel interface
+ * *only* if it is RSS-capable.
+ */
+ if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
+ !be_physfn(adapter) || (be_is_mc(adapter) &&
+ !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
res->max_tx_qs = 1;
else
res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3161,7 +3277,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
res->max_rx_qs = res->max_rss_qs + 1;
if (be_physfn(adapter))
- res->max_evt_qs = (max_vfs > 0) ?
+ res->max_evt_qs = (res->max_vfs > 0) ?
BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
else
res->max_evt_qs = 1;
@@ -3252,9 +3368,8 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
- /* primary mac needs 1 pmac entry */
- adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
- GFP_KERNEL);
+ adapter->pmac_id = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
if (!adapter->pmac_id)
return -ENOMEM;
@@ -3428,6 +3543,10 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
+ if (be_physfn(adapter))
+ be_cmd_set_logical_link_config(adapter,
+ IFLA_VF_LINK_STATE_AUTO, 0);
+
if (sriov_want(adapter)) {
if (be_max_vfs(adapter))
be_vf_setup(adapter);
@@ -4052,6 +4171,67 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
}
+#ifdef CONFIG_BE2NET_VXLAN
+static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ int status;
+
+ if (lancer_chip(adapter) || BEx_chip(adapter))
+ return;
+
+ if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
+ dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
+ be16_to_cpu(port));
+ dev_info(dev,
+ "Only one UDP port supported for VxLAN offloads\n");
+ return;
+ }
+
+ status = be_cmd_manage_iface(adapter, adapter->if_handle,
+ OP_CONVERT_NORMAL_TO_TUNNEL);
+ if (status) {
+ dev_warn(dev, "Failed to convert normal interface to tunnel\n");
+ goto err;
+ }
+
+ status = be_cmd_set_vxlan_port(adapter, port);
+ if (status) {
+ dev_warn(dev, "Failed to add VxLAN port\n");
+ goto err;
+ }
+ adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
+ adapter->vxlan_port = port;
+
+ dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
+ be16_to_cpu(port));
+ return;
+err:
+ be_disable_vxlan_offloads(adapter);
+ return;
+}
+
+static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (lancer_chip(adapter) || BEx_chip(adapter))
+ return;
+
+ if (adapter->vxlan_port != port)
+ return;
+
+ be_disable_vxlan_offloads(adapter);
+
+ dev_info(&adapter->pdev->dev,
+ "Disabled VxLAN offloads for UDP port %d\n",
+ be16_to_cpu(port));
+}
+#endif
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -4067,13 +4247,18 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_set_vf_vlan = be_set_vf_vlan,
.ndo_set_vf_tx_rate = be_set_vf_tx_rate,
.ndo_get_vf_config = be_get_vf_config,
+ .ndo_set_vf_link_state = be_set_vf_link_state,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = be_busy_poll
+ .ndo_busy_poll = be_busy_poll,
+#endif
+#ifdef CONFIG_BE2NET_VXLAN
+ .ndo_add_vxlan_port = be_add_vxlan_port,
+ .ndo_del_vxlan_port = be_del_vxlan_port,
#endif
};
@@ -4081,6 +4266,12 @@ static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (skyhawk_chip(adapter)) {
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX;
@@ -4427,14 +4618,32 @@ static bool be_reset_required(struct be_adapter *adapter)
static char *mc_name(struct be_adapter *adapter)
{
- if (adapter->function_mode & FLEX10_MODE)
- return "FLEX10";
- else if (adapter->function_mode & VNIC_MODE)
- return "vNIC";
- else if (adapter->function_mode & UMC_ENABLED)
- return "UMC";
- else
- return "";
+ char *str = ""; /* default */
+
+ switch (adapter->mc_type) {
+ case UMC:
+ str = "UMC";
+ break;
+ case FLEX10:
+ str = "FLEX10";
+ break;
+ case vNIC1:
+ str = "vNIC-1";
+ break;
+ case nPAR:
+ str = "nPAR";
+ break;
+ case UFP:
+ str = "UFP";
+ break;
+ case vNIC2:
+ str = "vNIC-2";
+ break;
+ default:
+ str = "";
+ }
+
+ return str;
}
static inline char *func_name(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 9cd5415fe017..a5dae4a62bb3 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 2cd1129e19af..a3ef8f804b9e 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 55e0fa03dc90..8b70ca7e342b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -660,11 +660,6 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return -EBUSY;
}
-static int ethoc_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void ethoc_mdio_poll(struct net_device *dev)
{
}
@@ -1210,7 +1205,6 @@ static int ethoc_probe(struct platform_device *pdev)
priv->mdio->name, pdev->id);
priv->mdio->read = ethoc_mdio_read;
priv->mdio->write = ethoc_mdio_write;
- priv->mdio->reset = ethoc_mdio_reset;
priv->mdio->priv = priv;
priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index c11ecbc98149..68069eabc4f8 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -940,11 +940,6 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
return -EIO;
}
-static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
/******************************************************************************
* struct ethtool_ops functions
*****************************************************************************/
@@ -1262,7 +1257,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->mii_bus->priv = netdev;
priv->mii_bus->read = ftgmac100_mdiobus_read;
priv->mii_bus->write = ftgmac100_mdiobus_write;
- priv->mii_bus->reset = ftgmac100_mdiobus_reset;
priv->mii_bus->irq = priv->phy_irq;
for (i = 0; i < PHY_MAX_ADDR; i++)
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 549ce13b92ac..71debd1c18c9 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
gianfar_driver-objs := gianfar.o \
- gianfar_ethtool.o \
- gianfar_sysfs.o
+ gianfar_ethtool.o
obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 03a351300013..8d69e439f0c5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -338,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Protocol checksum off-load for TCP and UDP. */
if (fec_enet_clear_csum(skb, ndev)) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1255,11 +1255,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
-static int fec_enet_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1384,7 +1379,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->name = "fec_enet_mii_bus";
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
- fep->mii_bus->reset = fec_enet_mdio_reset;
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
@@ -1904,10 +1898,11 @@ fec_set_mac_address(struct net_device *ndev, void *p)
struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ if (addr) {
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ }
writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
@@ -2006,6 +2001,8 @@ static int fec_enet_init(struct net_device *ndev)
/* Get the Ethernet address */
fec_get_mac(ndev);
+ /* make sure MAC we just acquired is programmed into the hw */
+ fec_set_mac_address(ndev, NULL);
/* init the tx & rx ring size */
fep->tx_ring_size = TX_RING_SIZE;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 89ccb5b08708..82386b29914a 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -372,6 +372,7 @@ void fec_ptp_init(struct platform_device *pdev)
fep->ptp_caps.n_alarm = 0;
fep->ptp_caps.n_ext_ts = 0;
fep->ptp_caps.n_per_out = 0;
+ fep->ptp_caps.n_pins = 0;
fep->ptp_caps.pps = 0;
fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
fep->ptp_caps.adjtime = fec_ptp_adjtime;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 62f042d4aaa9..dc80db41d6b3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -91,6 +91,9 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
u16 pkt_len, sc;
int curidx;
+ if (budget <= 0)
+ return received;
+
/*
* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 7e69c983d12a..ebf5d6429a8d 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -95,12 +95,6 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
}
-static int fs_enet_fec_mii_reset(struct mii_bus *bus)
-{
- /* nothing here - for now */
- return 0;
-}
-
static struct of_device_id fs_enet_mdio_fec_match[];
static int fs_enet_mdio_probe(struct platform_device *ofdev)
{
@@ -128,7 +122,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
new_bus->name = "FEC MII Bus";
new_bus->read = &fs_enet_fec_mii_read;
new_bus->write = &fs_enet_fec_mii_write;
- new_bus->reset = &fs_enet_fec_mii_reset;
ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ad5a5aadc7e1..9125d9abf099 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
* Copyright 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -121,7 +121,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
static void adjust_link(struct net_device *dev);
-static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
static int gfar_probe(struct platform_device *ofdev);
static int gfar_remove(struct platform_device *ofdev);
@@ -129,8 +128,10 @@ static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
static void gfar_configure_serdes(struct net_device *dev);
-static int gfar_poll(struct napi_struct *napi, int budget);
-static int gfar_poll_sq(struct napi_struct *napi, int budget);
+static int gfar_poll_rx(struct napi_struct *napi, int budget);
+static int gfar_poll_tx(struct napi_struct *napi, int budget);
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
@@ -138,9 +139,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi);
-void gfar_halt(struct net_device *dev);
-static void gfar_halt_nodisable(struct net_device *dev);
-void gfar_start(struct net_device *dev);
+static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
const u8 *addr);
@@ -332,72 +331,76 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
}
}
-static void gfar_init_mac(struct net_device *ndev)
+static void gfar_rx_buff_size_config(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(ndev);
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 rctrl = 0;
- u32 tctrl = 0;
- u32 attrs = 0;
-
- /* write the tx/rx base registers */
- gfar_init_tx_rx_base(priv);
-
- /* Configure the coalescing support */
- gfar_configure_coalescing_all(priv);
+ int frame_size = priv->ndev->mtu + ETH_HLEN;
/* set this when rx hw offload (TOE) functions are being used */
priv->uses_rxfcb = 0;
+ if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
+ priv->uses_rxfcb = 1;
+
+ if (priv->hwts_rx_en)
+ priv->uses_rxfcb = 1;
+
+ if (priv->uses_rxfcb)
+ frame_size += GMAC_FCB_LEN;
+
+ frame_size += priv->padding;
+
+ frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
+
+ priv->rx_buffer_size = frame_size;
+}
+
+static void gfar_mac_rx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 rctrl = 0;
+
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
/* Program the RIR0 reg with the required distribution */
- gfar_write(&regs->rir0, DEFAULT_RIR0);
+ if (priv->poll_mode == GFAR_SQ_POLLING)
+ gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
+ else /* GFAR_MQ_POLLING */
+ gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
}
/* Restore PROMISC mode */
- if (ndev->flags & IFF_PROMISC)
+ if (priv->ndev->flags & IFF_PROMISC)
rctrl |= RCTRL_PROM;
- if (ndev->features & NETIF_F_RXCSUM) {
+ if (priv->ndev->features & NETIF_F_RXCSUM)
rctrl |= RCTRL_CHECKSUMMING;
- priv->uses_rxfcb = 1;
- }
- if (priv->extended_hash) {
- rctrl |= RCTRL_EXTHASH;
-
- gfar_clear_exact_match(ndev);
- rctrl |= RCTRL_EMEN;
- }
+ if (priv->extended_hash)
+ rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
if (priv->padding) {
rctrl &= ~RCTRL_PAL_MASK;
rctrl |= RCTRL_PADDING(priv->padding);
}
- /* Insert receive time stamps into padding alignment bytes */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
- rctrl &= ~RCTRL_PAL_MASK;
- rctrl |= RCTRL_PADDING(8);
- priv->padding = 8;
- }
-
/* Enable HW time stamping if requested from user space */
- if (priv->hwts_rx_en) {
+ if (priv->hwts_rx_en)
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
- priv->uses_rxfcb = 1;
- }
- if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
- priv->uses_rxfcb = 1;
- }
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
+}
+
+static void gfar_mac_tx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tctrl = 0;
- if (ndev->features & NETIF_F_IP_CSUM)
+ if (priv->ndev->features & NETIF_F_IP_CSUM)
tctrl |= TCTRL_INIT_CSUM;
if (priv->prio_sched_en)
@@ -408,30 +411,51 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
}
- gfar_write(&regs->tctrl, tctrl);
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ tctrl |= TCTRL_VLINS;
- /* Set the extraction length and index */
- attrs = ATTRELI_EL(priv->rx_stash_size) |
- ATTRELI_EI(priv->rx_stash_index);
+ gfar_write(&regs->tctrl, tctrl);
+}
- gfar_write(&regs->attreli, attrs);
+static void gfar_configure_coalescing(struct gfar_private *priv,
+ unsigned long tx_mask, unsigned long rx_mask)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
- /* Start with defaults, and add stashing or locking
- * depending on the approprate variables
- */
- attrs = ATTR_INIT_SETTINGS;
+ if (priv->mode == MQ_MG_MODE) {
+ int i = 0;
- if (priv->bd_stash_en)
- attrs |= ATTR_BDSTASH;
+ baddr = &regs->txic0;
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->tx_queue[i]->txcoalescing))
+ gfar_write(baddr + i, priv->tx_queue[i]->txic);
+ }
- if (priv->rx_stash_size != 0)
- attrs |= ATTR_BUFSTASH;
+ baddr = &regs->rxic0;
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->rx_queue[i]->rxcoalescing))
+ gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+ }
+ } else {
+ /* Backward compatible case -- even if we enable
+ * multiple queues, there's only single reg to program
+ */
+ gfar_write(&regs->txic, 0);
+ if (likely(priv->tx_queue[0]->txcoalescing))
+ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
- gfar_write(&regs->attr, attrs);
+ gfar_write(&regs->rxic, 0);
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
+ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+ }
+}
- gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
- gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
- gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+void gfar_configure_coalescing_all(struct gfar_private *priv)
+{
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
}
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
@@ -479,12 +503,27 @@ static const struct net_device_ops gfar_netdev_ops = {
#endif
};
-void lock_rx_qs(struct gfar_private *priv)
+static void gfar_ints_disable(struct gfar_private *priv)
{
int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
- for (i = 0; i < priv->num_rx_queues; i++)
- spin_lock(&priv->rx_queue[i]->rxlock);
+ /* Initialize IMASK */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ }
+}
+
+static void gfar_ints_enable(struct gfar_private *priv)
+{
+ int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Unmask the interrupts we look for */
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+ }
}
void lock_tx_qs(struct gfar_private *priv)
@@ -495,23 +534,50 @@ void lock_tx_qs(struct gfar_private *priv)
spin_lock(&priv->tx_queue[i]->txlock);
}
-void unlock_rx_qs(struct gfar_private *priv)
+void unlock_tx_qs(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_rx_queues; i++)
- spin_unlock(&priv->rx_queue[i]->rxlock);
+ for (i = 0; i < priv->num_tx_queues; i++)
+ spin_unlock(&priv->tx_queue[i]->txlock);
}
-void unlock_tx_qs(struct gfar_private *priv)
+static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_tx_queues; i++)
- spin_unlock(&priv->tx_queue[i]->txlock);
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+ GFP_KERNEL);
+ if (!priv->tx_queue[i])
+ return -ENOMEM;
+
+ priv->tx_queue[i]->tx_skbuff = NULL;
+ priv->tx_queue[i]->qindex = i;
+ priv->tx_queue[i]->dev = priv->ndev;
+ spin_lock_init(&(priv->tx_queue[i]->txlock));
+ }
+ return 0;
}
-static void free_tx_pointers(struct gfar_private *priv)
+static int gfar_alloc_rx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+ GFP_KERNEL);
+ if (!priv->rx_queue[i])
+ return -ENOMEM;
+
+ priv->rx_queue[i]->rx_skbuff = NULL;
+ priv->rx_queue[i]->qindex = i;
+ priv->rx_queue[i]->dev = priv->ndev;
+ }
+ return 0;
+}
+
+static void gfar_free_tx_queues(struct gfar_private *priv)
{
int i;
@@ -519,7 +585,7 @@ static void free_tx_pointers(struct gfar_private *priv)
kfree(priv->tx_queue[i]);
}
-static void free_rx_pointers(struct gfar_private *priv)
+static void gfar_free_rx_queues(struct gfar_private *priv)
{
int i;
@@ -553,23 +619,26 @@ static void disable_napi(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_grps; i++)
- napi_disable(&priv->gfargrp[i].napi);
+ for (i = 0; i < priv->num_grps; i++) {
+ napi_disable(&priv->gfargrp[i].napi_rx);
+ napi_disable(&priv->gfargrp[i].napi_tx);
+ }
}
static void enable_napi(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_grps; i++)
- napi_enable(&priv->gfargrp[i].napi);
+ for (i = 0; i < priv->num_grps; i++) {
+ napi_enable(&priv->gfargrp[i].napi_rx);
+ napi_enable(&priv->gfargrp[i].napi_tx);
+ }
}
static int gfar_parse_group(struct device_node *np,
struct gfar_private *priv, const char *model)
{
struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
- u32 *queue_mask;
int i;
for (i = 0; i < GFAR_NUM_IRQS; i++) {
@@ -598,16 +667,52 @@ static int gfar_parse_group(struct device_node *np,
grp->priv = priv;
spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
- queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
- grp->rx_bit_map = queue_mask ?
- *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
- queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
- grp->tx_bit_map = queue_mask ?
- *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ u32 *rxq_mask, *txq_mask;
+ rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+ txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+
+ if (priv->poll_mode == GFAR_SQ_POLLING) {
+ /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
+ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ } else { /* GFAR_MQ_POLLING */
+ grp->rx_bit_map = rxq_mask ?
+ *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ grp->tx_bit_map = txq_mask ?
+ *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ }
} else {
grp->rx_bit_map = 0xFF;
grp->tx_bit_map = 0xFF;
}
+
+ /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
+ * right to left, so we need to revert the 8 bits to get the q index
+ */
+ grp->rx_bit_map = bitrev8(grp->rx_bit_map);
+ grp->tx_bit_map = bitrev8(grp->tx_bit_map);
+
+ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+ * also assign queues to groups
+ */
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ if (!grp->rx_queue)
+ grp->rx_queue = priv->rx_queue[i];
+ grp->num_rx_queues++;
+ grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
+ priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+ priv->rx_queue[i]->grp = grp;
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ if (!grp->tx_queue)
+ grp->tx_queue = priv->tx_queue[i];
+ grp->num_tx_queues++;
+ grp->tstat |= (TSTAT_CLEAR_THALT >> i);
+ priv->tqueue |= (TQUEUE_EN0 >> i);
+ priv->tx_queue[i]->grp = grp;
+ }
+
priv->num_grps++;
return 0;
@@ -628,13 +733,45 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
const u32 *stash_idx;
unsigned int num_tx_qs, num_rx_qs;
u32 *tx_queues, *rx_queues;
+ unsigned short mode, poll_mode;
if (!np || !of_device_is_available(np))
return -ENODEV;
- /* parse the num of tx and rx queues */
+ if (of_device_is_compatible(np, "fsl,etsec2")) {
+ mode = MQ_MG_MODE;
+ poll_mode = GFAR_SQ_POLLING;
+ } else {
+ mode = SQ_SG_MODE;
+ poll_mode = GFAR_SQ_POLLING;
+ }
+
+ /* parse the num of HW tx and rx queues */
tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
- num_tx_qs = tx_queues ? *tx_queues : 1;
+ rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+
+ if (mode == SQ_SG_MODE) {
+ num_tx_qs = 1;
+ num_rx_qs = 1;
+ } else { /* MQ_MG_MODE */
+ /* get the actual number of supported groups */
+ unsigned int num_grps = of_get_available_child_count(np);
+
+ if (num_grps == 0 || num_grps > MAXGROUPS) {
+ dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
+ num_grps);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ if (poll_mode == GFAR_SQ_POLLING) {
+ num_tx_qs = num_grps; /* one txq per int group */
+ num_rx_qs = num_grps; /* one rxq per int group */
+ } else { /* GFAR_MQ_POLLING */
+ num_tx_qs = tx_queues ? *tx_queues : 1;
+ num_rx_qs = rx_queues ? *rx_queues : 1;
+ }
+ }
if (num_tx_qs > MAX_TX_QS) {
pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
@@ -643,9 +780,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return -EINVAL;
}
- rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
- num_rx_qs = rx_queues ? *rx_queues : 1;
-
if (num_rx_qs > MAX_RX_QS) {
pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
num_rx_qs, MAX_RX_QS);
@@ -661,10 +795,20 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv = netdev_priv(dev);
priv->ndev = dev;
+ priv->mode = mode;
+ priv->poll_mode = poll_mode;
+
priv->num_tx_queues = num_tx_qs;
netif_set_real_num_rx_queues(dev, num_rx_qs);
priv->num_rx_queues = num_rx_qs;
- priv->num_grps = 0x0;
+
+ err = gfar_alloc_tx_queues(priv);
+ if (err)
+ goto tx_alloc_failed;
+
+ err = gfar_alloc_rx_queues(priv);
+ if (err)
+ goto rx_alloc_failed;
/* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
@@ -677,52 +821,18 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->gfargrp[i].regs = NULL;
/* Parse and initialize group specific information */
- if (of_device_is_compatible(np, "fsl,etsec2")) {
- priv->mode = MQ_MG_MODE;
+ if (priv->mode == MQ_MG_MODE) {
for_each_child_of_node(np, child) {
err = gfar_parse_group(child, priv, model);
if (err)
goto err_grp_init;
}
- } else {
- priv->mode = SQ_SG_MODE;
+ } else { /* SQ_SG_MODE */
err = gfar_parse_group(np, priv, model);
if (err)
goto err_grp_init;
}
- for (i = 0; i < priv->num_tx_queues; i++)
- priv->tx_queue[i] = NULL;
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i] = NULL;
-
- for (i = 0; i < priv->num_tx_queues; i++) {
- priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
- GFP_KERNEL);
- if (!priv->tx_queue[i]) {
- err = -ENOMEM;
- goto tx_alloc_failed;
- }
- priv->tx_queue[i]->tx_skbuff = NULL;
- priv->tx_queue[i]->qindex = i;
- priv->tx_queue[i]->dev = dev;
- spin_lock_init(&(priv->tx_queue[i]->txlock));
- }
-
- for (i = 0; i < priv->num_rx_queues; i++) {
- priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
- GFP_KERNEL);
- if (!priv->rx_queue[i]) {
- err = -ENOMEM;
- goto rx_alloc_failed;
- }
- priv->rx_queue[i]->rx_skbuff = NULL;
- priv->rx_queue[i]->qindex = i;
- priv->rx_queue[i]->dev = dev;
- spin_lock_init(&(priv->rx_queue[i]->rxlock));
- }
-
-
stash = of_get_property(np, "bd-stash", NULL);
if (stash) {
@@ -749,17 +859,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
FSL_GIANFAR_DEV_HAS_COALESCE |
FSL_GIANFAR_DEV_HAS_RMON |
FSL_GIANFAR_DEV_HAS_MULTI_INTR;
if (model && !strcasecmp(model, "eTSEC"))
- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
FSL_GIANFAR_DEV_HAS_COALESCE |
FSL_GIANFAR_DEV_HAS_RMON |
FSL_GIANFAR_DEV_HAS_MULTI_INTR |
- FSL_GIANFAR_DEV_HAS_PADDING |
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
@@ -784,12 +893,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return 0;
-rx_alloc_failed:
- free_rx_pointers(priv);
-tx_alloc_failed:
- free_tx_pointers(priv);
err_grp_init:
unmap_group_regs(priv);
+rx_alloc_failed:
+ gfar_free_rx_queues(priv);
+tx_alloc_failed:
+ gfar_free_tx_queues(priv);
free_gfar_dev(priv);
return err;
}
@@ -822,18 +931,16 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
- stop_gfar(netdev);
priv->hwts_rx_en = 0;
- startup_gfar(netdev);
+ reset_gfar(netdev);
}
break;
default:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
if (!priv->hwts_rx_en) {
- stop_gfar(netdev);
priv->hwts_rx_en = 1;
- startup_gfar(netdev);
+ reset_gfar(netdev);
}
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
@@ -875,19 +982,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(priv->phydev, rq, cmd);
}
-static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
-{
- unsigned int new_bit_map = 0x0;
- int mask = 0x1 << (max_qs - 1), i;
-
- for (i = 0; i < max_qs; i++) {
- if (bit_map & mask)
- new_bit_map = new_bit_map + (1 << i);
- mask = mask >> 0x1;
- }
- return new_bit_map;
-}
-
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
u32 class)
{
@@ -1005,99 +1099,140 @@ static void gfar_detect_errata(struct gfar_private *priv)
priv->errata);
}
-/* Set up the ethernet device structure, private data,
- * and anything else we need before we start
- */
-static int gfar_probe(struct platform_device *ofdev)
+void gfar_mac_reset(struct gfar_private *priv)
{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- struct net_device *dev = NULL;
- struct gfar_private *priv = NULL;
- struct gfar __iomem *regs = NULL;
- int err = 0, i, grp_idx = 0;
- u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
- u32 isrg = 0;
- u32 __iomem *baddr;
-
- err = gfar_of_init(ofdev, &dev);
-
- if (err)
- return err;
-
- priv = netdev_priv(dev);
- priv->ndev = dev;
- priv->ofdev = ofdev;
- priv->dev = &ofdev->dev;
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
- spin_lock_init(&priv->bflock);
- INIT_WORK(&priv->reset_task, gfar_reset_task);
-
- platform_set_drvdata(ofdev, priv);
- regs = priv->gfargrp[0].regs;
-
- gfar_detect_errata(priv);
-
- /* Stop the DMA engine now, in case it was running before
- * (The firmware could have used it, and left it running).
- */
- gfar_halt(dev);
/* Reset MAC layer */
gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
/* We need to delay at least 3 TX clocks */
- udelay(2);
+ udelay(3);
- tempval = 0;
- if (!priv->pause_aneg_en && priv->tx_pause_en)
- tempval |= MACCFG1_TX_FLOW;
- if (!priv->pause_aneg_en && priv->rx_pause_en)
- tempval |= MACCFG1_RX_FLOW;
/* the soft reset bit is not self-resetting, so we need to
* clear it before resuming normal operation
*/
- gfar_write(&regs->maccfg1, tempval);
+ gfar_write(&regs->maccfg1, 0);
+
+ udelay(3);
+
+ /* Compute rx_buff_size based on config flags */
+ gfar_rx_buff_size_config(priv);
+
+ /* Initialize the max receive frame/buffer lengths */
+ gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+ gfar_write(&regs->mrblr, priv->rx_buffer_size);
+
+ /* Initialize the Minimum Frame Length Register */
+ gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
- if (gfar_has_errata(priv, GFAR_ERRATA_74))
+
+ /* If the mtu is larger than the max size for standard
+ * ethernet frames (ie, a jumbo frame), then set maccfg2
+ * to allow huge frames, and to check the length
+ */
+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+ gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+
gfar_write(&regs->maccfg2, tempval);
+ /* Clear mac addr hash registers */
+ gfar_write(&regs->igaddr0, 0);
+ gfar_write(&regs->igaddr1, 0);
+ gfar_write(&regs->igaddr2, 0);
+ gfar_write(&regs->igaddr3, 0);
+ gfar_write(&regs->igaddr4, 0);
+ gfar_write(&regs->igaddr5, 0);
+ gfar_write(&regs->igaddr6, 0);
+ gfar_write(&regs->igaddr7, 0);
+
+ gfar_write(&regs->gaddr0, 0);
+ gfar_write(&regs->gaddr1, 0);
+ gfar_write(&regs->gaddr2, 0);
+ gfar_write(&regs->gaddr3, 0);
+ gfar_write(&regs->gaddr4, 0);
+ gfar_write(&regs->gaddr5, 0);
+ gfar_write(&regs->gaddr6, 0);
+ gfar_write(&regs->gaddr7, 0);
+
+ if (priv->extended_hash)
+ gfar_clear_exact_match(priv->ndev);
+
+ gfar_mac_rx_config(priv);
+
+ gfar_mac_tx_config(priv);
+
+ gfar_set_mac_address(priv->ndev);
+
+ gfar_set_multi(priv->ndev);
+
+ /* clear ievent and imask before configuring coalescing */
+ gfar_ints_disable(priv);
+
+ /* Configure the coalescing support */
+ gfar_configure_coalescing_all(priv);
+}
+
+static void gfar_hw_init(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 attrs;
+
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
+ */
+ gfar_halt(priv);
+
+ gfar_mac_reset(priv);
+
+ /* Zero out the rmon mib registers if it has them */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
+
+ /* Mask off the CAM interrupts */
+ gfar_write(&regs->rmon.cam1, 0xffffffff);
+ gfar_write(&regs->rmon.cam2, 0xffffffff);
+ }
+
/* Initialize ECNTRL */
gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
- /* Set the dev->base_addr to the gfar reg region */
- dev->base_addr = (unsigned long) regs;
+ /* Set the extraction length and index */
+ attrs = ATTRELI_EL(priv->rx_stash_size) |
+ ATTRELI_EI(priv->rx_stash_index);
- /* Fill in the dev structure */
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->mtu = 1500;
- dev->netdev_ops = &gfar_netdev_ops;
- dev->ethtool_ops = &gfar_ethtool_ops;
+ gfar_write(&regs->attreli, attrs);
- /* Register for napi ...We are registering NAPI for each grp */
- if (priv->mode == SQ_SG_MODE)
- netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
- GFAR_DEV_WEIGHT);
- else
- for (i = 0; i < priv->num_grps; i++)
- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
- GFAR_DEV_WEIGHT);
+ /* Start with defaults, and add stashing
+ * depending on driver parameters
+ */
+ attrs = ATTR_INIT_SETTINGS;
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
- dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM;
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
- }
+ if (priv->bd_stash_en)
+ attrs |= ATTR_BDSTASH;
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
- dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
+ if (priv->rx_stash_size != 0)
+ attrs |= ATTR_BUFSTASH;
+
+ gfar_write(&regs->attr, attrs);
+
+ /* FIFO configs */
+ gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
+ gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
+ gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
+
+ /* Program the interrupt steering regs, only for MG devices */
+ if (priv->num_grps > 1)
+ gfar_write_isrg(priv);
+}
+
+static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
priv->extended_hash = 1;
@@ -1133,68 +1268,81 @@ static int gfar_probe(struct platform_device *ofdev)
priv->hash_regs[6] = &regs->gaddr6;
priv->hash_regs[7] = &regs->gaddr7;
}
+}
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
- priv->padding = DEFAULT_PADDING;
- else
- priv->padding = 0;
+/* Set up the ethernet device structure, private data,
+ * and anything else we need before we start
+ */
+static int gfar_probe(struct platform_device *ofdev)
+{
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ int err = 0, i;
- if (dev->features & NETIF_F_IP_CSUM ||
- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
+ err = gfar_of_init(ofdev, &dev);
- /* Program the isrg regs only if number of grps > 1 */
- if (priv->num_grps > 1) {
- baddr = &regs->isrg0;
- for (i = 0; i < priv->num_grps; i++) {
- isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
- isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
- gfar_write(baddr, isrg);
- baddr++;
- isrg = 0x0;
+ if (err)
+ return err;
+
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+ priv->ofdev = ofdev;
+ priv->dev = &ofdev->dev;
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+
+ spin_lock_init(&priv->bflock);
+ INIT_WORK(&priv->reset_task, gfar_reset_task);
+
+ platform_set_drvdata(ofdev, priv);
+
+ gfar_detect_errata(priv);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
+
+ /* Fill in the dev structure */
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->mtu = 1500;
+ dev->netdev_ops = &gfar_netdev_ops;
+ dev->ethtool_ops = &gfar_ethtool_ops;
+
+ /* Register for napi ...We are registering NAPI for each grp */
+ for (i = 0; i < priv->num_grps; i++) {
+ if (priv->poll_mode == GFAR_SQ_POLLING) {
+ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+ gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx_sq, 2);
+ } else {
+ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+ gfar_poll_rx, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx, 2);
}
}
- /* Need to reverse the bit maps as bit_map's MSB is q0
- * but, for_each_set_bit parses from right to left, which
- * basically reverses the queue numbers
- */
- for (i = 0; i< priv->num_grps; i++) {
- priv->gfargrp[i].tx_bit_map =
- reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
- priv->gfargrp[i].rx_bit_map =
- reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
- /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
- * also assign queues to groups
- */
- for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
- priv->gfargrp[grp_idx].num_rx_queues = 0x0;
-
- for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
- priv->num_rx_queues) {
- priv->gfargrp[grp_idx].num_rx_queues++;
- priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
- rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
- rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
- }
- priv->gfargrp[grp_idx].num_tx_queues = 0x0;
-
- for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
- priv->num_tx_queues) {
- priv->gfargrp[grp_idx].num_tx_queues++;
- priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
- tstat = tstat | (TSTAT_CLEAR_THALT >> i);
- tqueue = tqueue | (TQUEUE_EN0 >> i);
- }
- priv->gfargrp[grp_idx].rstat = rstat;
- priv->gfargrp[grp_idx].tstat = tstat;
- rstat = tstat =0;
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
- gfar_write(&regs->rqueue, rqueue);
- gfar_write(&regs->tqueue, tqueue);
+ gfar_init_addr_hash_table(priv);
+
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->padding = 8;
+
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ dev->needed_headroom = GMAC_FCB_LEN;
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
@@ -1220,8 +1368,9 @@ static int gfar_probe(struct platform_device *ofdev)
if (priv->num_tx_queues == 1)
priv->prio_sched_en = 1;
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
+ set_bit(GFAR_DOWN, &priv->state);
+
+ gfar_hw_init(priv);
err = register_netdev(dev);
@@ -1230,6 +1379,9 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
device_init_wakeup(&dev->dev,
priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1251,9 +1403,6 @@ static int gfar_probe(struct platform_device *ofdev)
/* Initialize the filer table */
gfar_init_filer_table(priv);
- /* Create all the sysfs files */
- gfar_init_sysfs(dev);
-
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
@@ -1272,8 +1421,8 @@ static int gfar_probe(struct platform_device *ofdev)
register_fail:
unmap_group_regs(priv);
- free_tx_pointers(priv);
- free_rx_pointers(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
if (priv->phy_node)
of_node_put(priv->phy_node);
if (priv->tbi_node)
@@ -1293,6 +1442,8 @@ static int gfar_remove(struct platform_device *ofdev)
unregister_netdev(priv->ndev);
unmap_group_regs(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
free_gfar_dev(priv);
return 0;
@@ -1318,9 +1469,8 @@ static int gfar_suspend(struct device *dev)
local_irq_save(flags);
lock_tx_qs(priv);
- lock_rx_qs(priv);
- gfar_halt_nodisable(ndev);
+ gfar_halt_nodisable(priv);
/* Disable Tx, and Rx if wake-on-LAN is disabled. */
tempval = gfar_read(&regs->maccfg1);
@@ -1332,7 +1482,6 @@ static int gfar_suspend(struct device *dev)
gfar_write(&regs->maccfg1, tempval);
- unlock_rx_qs(priv);
unlock_tx_qs(priv);
local_irq_restore(flags);
@@ -1378,15 +1527,13 @@ static int gfar_resume(struct device *dev)
*/
local_irq_save(flags);
lock_tx_qs(priv);
- lock_rx_qs(priv);
tempval = gfar_read(&regs->maccfg2);
tempval &= ~MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
- gfar_start(ndev);
+ gfar_start(priv);
- unlock_rx_qs(priv);
unlock_tx_qs(priv);
local_irq_restore(flags);
@@ -1413,10 +1560,11 @@ static int gfar_restore(struct device *dev)
return -ENOMEM;
}
- init_registers(ndev);
- gfar_set_mac_address(ndev);
- gfar_init_mac(ndev);
- gfar_start(ndev);
+ gfar_mac_reset(priv);
+
+ gfar_init_tx_rx_base(priv);
+
+ gfar_start(priv);
priv->oldlink = 0;
priv->oldspeed = 0;
@@ -1574,57 +1722,6 @@ static void gfar_configure_serdes(struct net_device *dev)
BMCR_SPEED1000);
}
-static void init_registers(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
- int i;
-
- for (i = 0; i < priv->num_grps; i++) {
- regs = priv->gfargrp[i].regs;
- /* Clear IEVENT */
- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
-
- /* Initialize IMASK */
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
- }
-
- regs = priv->gfargrp[0].regs;
- /* Init hash registers to zero */
- gfar_write(&regs->igaddr0, 0);
- gfar_write(&regs->igaddr1, 0);
- gfar_write(&regs->igaddr2, 0);
- gfar_write(&regs->igaddr3, 0);
- gfar_write(&regs->igaddr4, 0);
- gfar_write(&regs->igaddr5, 0);
- gfar_write(&regs->igaddr6, 0);
- gfar_write(&regs->igaddr7, 0);
-
- gfar_write(&regs->gaddr0, 0);
- gfar_write(&regs->gaddr1, 0);
- gfar_write(&regs->gaddr2, 0);
- gfar_write(&regs->gaddr3, 0);
- gfar_write(&regs->gaddr4, 0);
- gfar_write(&regs->gaddr5, 0);
- gfar_write(&regs->gaddr6, 0);
- gfar_write(&regs->gaddr7, 0);
-
- /* Zero out the rmon mib registers if it has them */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
- memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
-
- /* Mask off the CAM interrupts */
- gfar_write(&regs->rmon.cam1, 0xffffffff);
- gfar_write(&regs->rmon.cam2, 0xffffffff);
- }
-
- /* Initialize the max receive buffer length */
- gfar_write(&regs->mrblr, priv->rx_buffer_size);
-
- /* Initialize the Minimum Frame Length Register */
- gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
-}
-
static int __gfar_is_rx_idle(struct gfar_private *priv)
{
u32 res;
@@ -1648,23 +1745,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
}
/* Halt the receive and transmit queues */
-static void gfar_halt_nodisable(struct net_device *dev)
+static void gfar_halt_nodisable(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- int i;
-
- for (i = 0; i < priv->num_grps; i++) {
- regs = priv->gfargrp[i].regs;
- /* Mask all interrupts */
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
- /* Clear all interrupts */
- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
- }
+ gfar_ints_disable(priv);
- regs = priv->gfargrp[0].regs;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
@@ -1685,56 +1772,41 @@ static void gfar_halt_nodisable(struct net_device *dev)
}
/* Halt the receive and transmit queues */
-void gfar_halt(struct net_device *dev)
+void gfar_halt(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- gfar_halt_nodisable(dev);
+ /* Dissable the Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, 0);
+ gfar_write(&regs->tqueue, 0);
- /* Disable Rx and Tx */
+ mdelay(10);
+
+ gfar_halt_nodisable(priv);
+
+ /* Disable Rx/Tx DMA */
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
}
-static void free_grp_irqs(struct gfar_priv_grp *grp)
-{
- free_irq(gfar_irq(grp, TX)->irq, grp);
- free_irq(gfar_irq(grp, RX)->irq, grp);
- free_irq(gfar_irq(grp, ER)->irq, grp);
-}
-
void stop_gfar(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
- int i;
-
- phy_stop(priv->phydev);
+ netif_tx_stop_all_queues(dev);
- /* Lock it down */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
+ smp_mb__before_clear_bit();
+ set_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_clear_bit();
- gfar_halt(dev);
+ disable_napi(priv);
- unlock_rx_qs(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
+ /* disable ints and gracefully shut down Rx/Tx DMA */
+ gfar_halt(priv);
- /* Free the IRQs */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
- for (i = 0; i < priv->num_grps; i++)
- free_grp_irqs(&priv->gfargrp[i]);
- } else {
- for (i = 0; i < priv->num_grps; i++)
- free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
- &priv->gfargrp[i]);
- }
+ phy_stop(priv->phydev);
free_skb_resources(priv);
}
@@ -1825,17 +1897,15 @@ static void free_skb_resources(struct gfar_private *priv)
priv->tx_queue[0]->tx_bd_dma_base);
}
-void gfar_start(struct net_device *dev)
+void gfar_start(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
int i = 0;
- /* Enable Rx and Tx in MACCFG1 */
- tempval = gfar_read(&regs->maccfg1);
- tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
- gfar_write(&regs->maccfg1, tempval);
+ /* Enable Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
+ gfar_write(&regs->tqueue, priv->tqueue);
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(&regs->dmactrl);
@@ -1852,52 +1922,23 @@ void gfar_start(struct net_device *dev)
/* Clear THLT/RHLT, so that the DMA starts polling now */
gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
- /* Unmask the interrupts we look for */
- gfar_write(&regs->imask, IMASK_DEFAULT);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
-}
-
-static void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned long tx_mask, unsigned long rx_mask)
-{
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 __iomem *baddr;
-
- if (priv->mode == MQ_MG_MODE) {
- int i = 0;
-
- baddr = &regs->txic0;
- for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
- gfar_write(baddr + i, 0);
- if (likely(priv->tx_queue[i]->txcoalescing))
- gfar_write(baddr + i, priv->tx_queue[i]->txic);
- }
+ /* Enable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
- baddr = &regs->rxic0;
- for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
- gfar_write(baddr + i, 0);
- if (likely(priv->rx_queue[i]->rxcoalescing))
- gfar_write(baddr + i, priv->rx_queue[i]->rxic);
- }
- } else {
- /* Backward compatible case -- even if we enable
- * multiple queues, there's only single reg to program
- */
- gfar_write(&regs->txic, 0);
- if (likely(priv->tx_queue[0]->txcoalescing))
- gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+ gfar_ints_enable(priv);
- gfar_write(&regs->rxic, 0);
- if (unlikely(priv->rx_queue[0]->rxcoalescing))
- gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
- }
+ priv->ndev->trans_start = jiffies; /* prevent tx timeout */
}
-void gfar_configure_coalescing_all(struct gfar_private *priv)
+static void free_grp_irqs(struct gfar_priv_grp *grp)
{
- gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+ free_irq(gfar_irq(grp, RX)->irq, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
}
static int register_grp_irqs(struct gfar_priv_grp *grp)
@@ -1956,46 +1997,65 @@ err_irq_fail:
}
-/* Bring the controller up and running */
-int startup_gfar(struct net_device *ndev)
+static void gfar_free_irq(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(ndev);
- struct gfar __iomem *regs = NULL;
- int err, i, j;
+ int i;
- for (i = 0; i < priv->num_grps; i++) {
- regs= priv->gfargrp[i].regs;
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ /* Free the IRQs */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ for (i = 0; i < priv->num_grps; i++)
+ free_grp_irqs(&priv->gfargrp[i]);
+ } else {
+ for (i = 0; i < priv->num_grps; i++)
+ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
+ &priv->gfargrp[i]);
}
+}
- regs= priv->gfargrp[0].regs;
- err = gfar_alloc_skb_resources(ndev);
- if (err)
- return err;
-
- gfar_init_mac(ndev);
+static int gfar_request_irq(struct gfar_private *priv)
+{
+ int err, i, j;
for (i = 0; i < priv->num_grps; i++) {
err = register_grp_irqs(&priv->gfargrp[i]);
if (err) {
for (j = 0; j < i; j++)
free_grp_irqs(&priv->gfargrp[j]);
- goto irq_fail;
+ return err;
}
}
- /* Start the controller */
- gfar_start(ndev);
+ return 0;
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ int err;
+
+ gfar_mac_reset(priv);
+
+ err = gfar_alloc_skb_resources(ndev);
+ if (err)
+ return err;
+
+ gfar_init_tx_rx_base(priv);
+
+ smp_mb__before_clear_bit();
+ clear_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_clear_bit();
+
+ /* Start Rx/Tx DMA and enable the interrupts */
+ gfar_start(priv);
phy_start(priv->phydev);
- gfar_configure_coalescing_all(priv);
+ enable_napi(priv);
- return 0;
+ netif_tx_wake_all_queues(ndev);
-irq_fail:
- free_skb_resources(priv);
- return err;
+ return 0;
}
/* Called when something needs to use the ethernet device
@@ -2006,27 +2066,17 @@ static int gfar_enet_open(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
int err;
- enable_napi(priv);
-
- /* Initialize a bunch of registers */
- init_registers(dev);
-
- gfar_set_mac_address(dev);
-
err = init_phy(dev);
+ if (err)
+ return err;
- if (err) {
- disable_napi(priv);
+ err = gfar_request_irq(priv);
+ if (err)
return err;
- }
err = startup_gfar(dev);
- if (err) {
- disable_napi(priv);
+ if (err)
return err;
- }
-
- netif_tx_start_all_queues(dev);
device_set_wakeup_enable(&dev->dev, priv->wol_en);
@@ -2152,13 +2202,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_new = skb_realloc_headroom(skb, fcb_len);
if (!skb_new) {
dev->stats.tx_errors++;
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb->sk)
skb_set_owner_w(skb_new, skb->sk);
- consume_skb(skb);
+ dev_consume_skb_any(skb);
skb = skb_new;
}
@@ -2351,8 +2401,6 @@ static int gfar_close(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- disable_napi(priv);
-
cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
@@ -2360,7 +2408,7 @@ static int gfar_close(struct net_device *dev)
phy_disconnect(priv->phydev);
priv->phydev = NULL;
- netif_tx_stop_all_queues(dev);
+ gfar_free_irq(priv);
return 0;
}
@@ -2373,77 +2421,9 @@ static int gfar_set_mac_address(struct net_device *dev)
return 0;
}
-/* Check if rx parser should be activated */
-void gfar_check_rx_parser_mode(struct gfar_private *priv)
-{
- struct gfar __iomem *regs;
- u32 tempval;
-
- regs = priv->gfargrp[0].regs;
-
- tempval = gfar_read(&regs->rctrl);
- /* If parse is no longer required, then disable parser */
- if (tempval & RCTRL_REQ_PARSER) {
- tempval |= RCTRL_PRSDEP_INIT;
- priv->uses_rxfcb = 1;
- } else {
- tempval &= ~RCTRL_PRSDEP_INIT;
- priv->uses_rxfcb = 0;
- }
- gfar_write(&regs->rctrl, tempval);
-}
-
-/* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
- unsigned long flags;
- u32 tempval;
-
- regs = priv->gfargrp[0].regs;
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (features & NETIF_F_HW_VLAN_CTAG_TX) {
- /* Enable VLAN tag insertion */
- tempval = gfar_read(&regs->tctrl);
- tempval |= TCTRL_VLINS;
- gfar_write(&regs->tctrl, tempval);
- } else {
- /* Disable VLAN tag insertion */
- tempval = gfar_read(&regs->tctrl);
- tempval &= ~TCTRL_VLINS;
- gfar_write(&regs->tctrl, tempval);
- }
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX) {
- /* Enable VLAN tag extraction */
- tempval = gfar_read(&regs->rctrl);
- tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
- gfar_write(&regs->rctrl, tempval);
- priv->uses_rxfcb = 1;
- } else {
- /* Disable VLAN tag extraction */
- tempval = gfar_read(&regs->rctrl);
- tempval &= ~RCTRL_VLEX;
- gfar_write(&regs->rctrl, tempval);
-
- gfar_check_rx_parser_mode(priv);
- }
-
- gfar_change_mtu(dev, dev->mtu);
-
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-}
-
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
- int tempsize, tempval;
struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + ETH_HLEN;
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
@@ -2451,45 +2431,33 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
- if (priv->uses_rxfcb)
- frame_size += GMAC_FCB_LEN;
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- frame_size += priv->padding;
-
- tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
-
- /* Only stop and start the controller if it isn't already
- * stopped, and we changed something
- */
- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ if (dev->flags & IFF_UP)
stop_gfar(dev);
- priv->rx_buffer_size = tempsize;
-
dev->mtu = new_mtu;
- gfar_write(&regs->mrblr, priv->rx_buffer_size);
- gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+ if (dev->flags & IFF_UP)
+ startup_gfar(dev);
- /* If the mtu is larger than the max size for standard
- * ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length
- */
- tempval = gfar_read(&regs->maccfg2);
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
- if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
- tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
- else
- tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+ return 0;
+}
- gfar_write(&regs->maccfg2, tempval);
+void reset_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
- startup_gfar(dev);
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- return 0;
+ stop_gfar(ndev);
+ startup_gfar(ndev);
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
}
/* gfar_reset_task gets scheduled when a packet has not been
@@ -2501,16 +2469,7 @@ static void gfar_reset_task(struct work_struct *work)
{
struct gfar_private *priv = container_of(work, struct gfar_private,
reset_task);
- struct net_device *dev = priv->ndev;
-
- if (dev->flags & IFF_UP) {
- netif_tx_stop_all_queues(dev);
- stop_gfar(dev);
- startup_gfar(dev);
- netif_tx_start_all_queues(dev);
- }
-
- netif_tx_schedule_all(dev);
+ reset_gfar(priv->ndev);
}
static void gfar_timeout(struct net_device *dev)
@@ -2623,8 +2582,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
}
/* If we freed a buffer, we can restart transmission, if necessary */
- if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
- netif_wake_subqueue(dev, tqi);
+ if (tx_queue->num_txbdfree &&
+ netif_tx_queue_stopped(txq) &&
+ !(test_bit(GFAR_DOWN, &priv->state)))
+ netif_wake_subqueue(priv->ndev, tqi);
/* Update dirty indicators */
tx_queue->skb_dirtytx = skb_dirtytx;
@@ -2633,31 +2594,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
-static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gfargrp->grplock, flags);
- if (napi_schedule_prep(&gfargrp->napi)) {
- gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
- __napi_schedule(&gfargrp->napi);
- } else {
- /* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived.
- */
- gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
- }
- spin_unlock_irqrestore(&gfargrp->grplock, flags);
-
-}
-
-/* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *grp_id)
-{
- gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
- return IRQ_HANDLED;
-}
-
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
struct sk_buff *skb)
{
@@ -2728,7 +2664,48 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
irqreturn_t gfar_receive(int irq, void *grp_id)
{
- gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
+ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+ unsigned long flags;
+ u32 imask;
+
+ if (likely(napi_schedule_prep(&grp->napi_rx))) {
+ spin_lock_irqsave(&grp->grplock, flags);
+ imask = gfar_read(&grp->regs->imask);
+ imask &= IMASK_RX_DISABLED;
+ gfar_write(&grp->regs->imask, imask);
+ spin_unlock_irqrestore(&grp->grplock, flags);
+ __napi_schedule(&grp->napi_rx);
+ } else {
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+ unsigned long flags;
+ u32 imask;
+
+ if (likely(napi_schedule_prep(&grp->napi_tx))) {
+ spin_lock_irqsave(&grp->grplock, flags);
+ imask = gfar_read(&grp->regs->imask);
+ imask &= IMASK_TX_DISABLED;
+ gfar_write(&grp->regs->imask, imask);
+ spin_unlock_irqrestore(&grp->grplock, flags);
+ __napi_schedule(&grp->napi_tx);
+ } else {
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
+ }
+
return IRQ_HANDLED;
}
@@ -2852,7 +2829,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
rx_queue->stats.rx_bytes += pkt_len;
skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull,
- &rx_queue->grp->napi);
+ &rx_queue->grp->napi_rx);
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2881,66 +2858,81 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
return howmany;
}
-static int gfar_poll_sq(struct napi_struct *napi, int budget)
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
{
struct gfar_priv_grp *gfargrp =
- container_of(napi, struct gfar_priv_grp, napi);
+ container_of(napi, struct gfar_priv_grp, napi_rx);
struct gfar __iomem *regs = gfargrp->regs;
- struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
- struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
+ struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
int work_done = 0;
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived
*/
- gfar_write(&regs->ievent, IEVENT_RTX_MASK);
-
- /* run Tx cleanup to completion */
- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
- gfar_clean_tx_ring(tx_queue);
+ gfar_write(&regs->ievent, IEVENT_RX_MASK);
work_done = gfar_clean_rx_ring(rx_queue, budget);
if (work_done < budget) {
+ u32 imask;
napi_complete(napi);
/* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat);
- gfar_write(&regs->imask, IMASK_DEFAULT);
-
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_write(&regs->txic, 0);
- if (likely(tx_queue->txcoalescing))
- gfar_write(&regs->txic, tx_queue->txic);
-
- gfar_write(&regs->rxic, 0);
- if (unlikely(rx_queue->rxcoalescing))
- gfar_write(&regs->rxic, rx_queue->rxic);
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_RX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
}
return work_done;
}
-static int gfar_poll(struct napi_struct *napi, int budget)
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_tx);
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
+ u32 imask;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
+ gfar_clean_tx_ring(tx_queue);
+
+ napi_complete(napi);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_TX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+
+ return 0;
+}
+
+static int gfar_poll_rx(struct napi_struct *napi, int budget)
{
struct gfar_priv_grp *gfargrp =
- container_of(napi, struct gfar_priv_grp, napi);
+ container_of(napi, struct gfar_priv_grp, napi_rx);
struct gfar_private *priv = gfargrp->priv;
struct gfar __iomem *regs = gfargrp->regs;
- struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
int work_done = 0, work_done_per_q = 0;
int i, budget_per_q = 0;
- int has_tx_work = 0;
unsigned long rstat_rxf;
int num_act_queues;
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived
*/
- gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+ gfar_write(&regs->ievent, IEVENT_RX_MASK);
rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
@@ -2948,15 +2940,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
if (num_act_queues)
budget_per_q = budget/num_act_queues;
- for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
- tx_queue = priv->tx_queue[i];
- /* run Tx cleanup to completion */
- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
- gfar_clean_tx_ring(tx_queue);
- has_tx_work = 1;
- }
- }
-
for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
/* skip queue if not active */
if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
@@ -2979,25 +2962,62 @@ static int gfar_poll(struct napi_struct *napi, int budget)
}
}
- if (!num_act_queues && !has_tx_work) {
-
+ if (!num_act_queues) {
+ u32 imask;
napi_complete(napi);
/* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat);
- gfar_write(&regs->imask, IMASK_DEFAULT);
-
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
- gfargrp->tx_bit_map);
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_RX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
}
return work_done;
}
+static int gfar_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_tx);
+ struct gfar_private *priv = gfargrp->priv;
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ int has_tx_work = 0;
+ int i;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+ tx_queue = priv->tx_queue[i];
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+ gfar_clean_tx_ring(tx_queue);
+ has_tx_work = 1;
+ }
+ }
+
+ if (!has_tx_work) {
+ u32 imask;
+ napi_complete(napi);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_TX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return 0;
+}
+
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
@@ -3101,12 +3121,11 @@ static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
struct phy_device *phydev = priv->phydev;
int new_state = 0;
- local_irq_save(flags);
- lock_tx_qs(priv);
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return;
if (phydev->link) {
u32 tempval1 = gfar_read(&regs->maccfg1);
@@ -3178,8 +3197,6 @@ static void adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
}
/* Update the hash table based on the current list of multicast
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 52bb2b0195cc..84632c569f2c 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -377,8 +377,11 @@ extern const char gfar_driver_version[];
IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
| IMASK_PERR)
-#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
- & IMASK_DEFAULT)
+#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
+#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
+
+#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
+#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
/* Fifo management */
#define FIFO_TX_THR_MASK 0x01ff
@@ -409,7 +412,9 @@ extern const char gfar_driver_version[];
/* This default RIR value directly corresponds
* to the 3-bit hash value generated */
-#define DEFAULT_RIR0 0x05397700
+#define DEFAULT_8RXQ_RIR0 0x05397700
+/* Map even hash values to Q0, and odd ones to Q1 */
+#define DEFAULT_2RXQ_RIR0 0x04104100
/* RQFCR register bits */
#define RQFCR_GPI 0x80000000
@@ -880,7 +885,6 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
-#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
@@ -892,8 +896,8 @@ struct gfar {
#define DEFAULT_MAPPING 0xFF
#endif
-#define ISRG_SHIFT_TX 0x10
-#define ISRG_SHIFT_RX 0x18
+#define ISRG_RR0 0x80000000
+#define ISRG_TR0 0x00800000
/* The same driver can operate in two modes */
/* SQ_SG_MODE: Single Queue Single Group Mode
@@ -905,6 +909,22 @@ enum {
MQ_MG_MODE
};
+/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
+ * The driver supports a single pair of RX/Tx queues
+ * per interrupt group (Rx/Tx int line). MQ_MG mode
+ * devices have 2 interrupt groups, so the device will
+ * have a total of 2 Tx and 2 Rx queues in this case.
+ * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
+ * The driver supports all the 8 Rx and Tx HW queues
+ * each queue mapped by the Device Tree to one of
+ * the 2 interrupt groups. This mode implies significant
+ * processing overhead (CPU and controller level).
+ */
+enum gfar_poll_mode {
+ GFAR_SQ_POLLING = 0,
+ GFAR_MQ_POLLING
+};
+
/*
* Per TX queue stats
*/
@@ -966,7 +986,6 @@ struct rx_q_stats {
/**
* struct gfar_priv_rx_q - per rx queue structure
- * @rxlock: per queue rx spin lock
* @rx_skbuff: skb pointers
* @skb_currx: currently use skb pointer
* @rx_bd_base: First rx buffer descriptor
@@ -979,8 +998,7 @@ struct rx_q_stats {
*/
struct gfar_priv_rx_q {
- spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
- struct sk_buff ** rx_skbuff;
+ struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
dma_addr_t rx_bd_dma_base;
struct rxbd8 *rx_bd_base;
struct rxbd8 *cur_rx;
@@ -1016,17 +1034,20 @@ struct gfar_irqinfo {
*/
struct gfar_priv_grp {
- spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
- struct napi_struct napi;
- struct gfar_private *priv;
+ spinlock_t grplock __aligned(SMP_CACHE_BYTES);
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
struct gfar __iomem *regs;
- unsigned int rstat;
- unsigned long num_rx_queues;
- unsigned long rx_bit_map;
- /* cacheline 3 */
+ struct gfar_priv_tx_q *tx_queue;
+ struct gfar_priv_rx_q *rx_queue;
unsigned int tstat;
+ unsigned int rstat;
+
+ struct gfar_private *priv;
unsigned long num_tx_queues;
unsigned long tx_bit_map;
+ unsigned long num_rx_queues;
+ unsigned long rx_bit_map;
struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
};
@@ -1041,6 +1062,11 @@ enum gfar_errata {
GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
};
+enum gfar_dev_state {
+ GFAR_DOWN = 1,
+ GFAR_RESETTING
+};
+
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
* (Ok, that's not so true anymore, but there is a family resemblance)
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -1051,8 +1077,6 @@ enum gfar_errata {
* the buffer descriptor determines the actual condition.
*/
struct gfar_private {
- unsigned int num_rx_queues;
-
struct device *dev;
struct net_device *ndev;
enum gfar_errata errata;
@@ -1060,6 +1084,7 @@ struct gfar_private {
u16 uses_rxfcb;
u16 padding;
+ u32 device_flags;
/* HW time stamping enabled flag */
int hwts_rx_en;
@@ -1069,10 +1094,12 @@ struct gfar_private {
struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
struct gfar_priv_grp gfargrp[MAXGROUPS];
- u32 device_flags;
+ unsigned long state;
- unsigned int mode;
+ unsigned short mode;
+ unsigned short poll_mode;
unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
unsigned int num_grps;
/* Network Statistics */
@@ -1113,6 +1140,9 @@ struct gfar_private {
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ u32 rqueue;
+ u32 tqueue;
+
/* RX per device parameters */
unsigned int rx_stash_size;
unsigned int rx_stash_index;
@@ -1127,11 +1157,6 @@ struct gfar_private {
u32 __iomem *hash_regs[16];
int hash_width;
- /* global parameters */
- unsigned int fifo_threshold;
- unsigned int fifo_starve;
- unsigned int fifo_starve_off;
-
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1176,21 +1201,42 @@ static inline void gfar_read_filer(struct gfar_private *priv,
*fpr = gfar_read(&regs->rqfpr);
}
-void lock_rx_qs(struct gfar_private *priv);
-void lock_tx_qs(struct gfar_private *priv);
-void unlock_rx_qs(struct gfar_private *priv);
-void unlock_tx_qs(struct gfar_private *priv);
+static inline void gfar_write_isrg(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr = &regs->isrg0;
+ u32 isrg = 0;
+ int grp_idx, i;
+
+ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
+
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ isrg |= (ISRG_RR0 >> i);
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ isrg |= (ISRG_TR0 >> i);
+ }
+
+ gfar_write(baddr, isrg);
+
+ baddr++;
+ isrg = 0;
+ }
+}
+
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
-void gfar_halt(struct net_device *dev);
+void reset_gfar(struct net_device *dev);
+void gfar_mac_reset(struct gfar_private *priv);
+void gfar_halt(struct gfar_private *priv);
+void gfar_start(struct gfar_private *priv);
void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
u32 regnum, u32 read);
void gfar_configure_coalescing_all(struct gfar_private *priv);
-void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
-void gfar_check_rx_parser_mode(struct gfar_private *priv);
-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 63d234419cc1..891dbee6e6c1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -44,10 +44,6 @@
#include "gianfar.h"
-extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
- int rx_work_limit);
-
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
@@ -364,25 +360,11 @@ static int gfar_scoalesce(struct net_device *dev,
struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
- int i = 0;
+ int i, err = 0;
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
- /* Set up rx coalescing */
- /* As of now, we will enable/disable coalescing for all
- * queues together in case of eTSEC2, this will be modified
- * along with the ethtool interface
- */
- if ((cvals->rx_coalesce_usecs == 0) ||
- (cvals->rx_max_coalesced_frames == 0)) {
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i]->rxcoalescing = 0;
- } else {
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i]->rxcoalescing = 1;
- }
-
if (NULL == priv->phydev)
return -ENODEV;
@@ -399,6 +381,32 @@ static int gfar_scoalesce(struct net_device *dev,
return -EINVAL;
}
+ /* Check the bounds of the values */
+ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ netdev_info(dev, "Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ /* Set up rx coalescing */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 1;
+ }
+
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i]->rxic = mk_ic_value(
cvals->rx_max_coalesced_frames,
@@ -415,28 +423,22 @@ static int gfar_scoalesce(struct net_device *dev,
priv->tx_queue[i]->txcoalescing = 1;
}
- /* Check the bounds of the values */
- if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
- netdev_info(dev, "Coalescing is limited to %d microseconds\n",
- GFAR_MAX_COAL_USECS);
- return -EINVAL;
- }
-
- if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
- netdev_info(dev, "Coalescing is limited to %d frames\n",
- GFAR_MAX_COAL_FRAMES);
- return -EINVAL;
- }
-
for (i = 0; i < priv->num_tx_queues; i++) {
priv->tx_queue[i]->txic = mk_ic_value(
cvals->tx_max_coalesced_frames,
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
}
- gfar_configure_coalescing_all(priv);
+ if (dev->flags & IFF_UP) {
+ stop_gfar(dev);
+ err = startup_gfar(dev);
+ } else {
+ gfar_mac_reset(priv);
+ }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
- return 0;
+ return err;
}
/* Fills in rvals with the current ring parameters. Currently,
@@ -467,15 +469,13 @@ static void gfar_gringparam(struct net_device *dev,
}
/* Change the current ring parameters, stopping the controller if
- * necessary so that we don't mess things up while we're in
- * motion. We wait for the ring to be clean before reallocating
- * the rings.
+ * necessary so that we don't mess things up while we're in motion.
*/
static int gfar_sringparam(struct net_device *dev,
struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
- int err = 0, i = 0;
+ int err = 0, i;
if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
return -EINVAL;
@@ -493,44 +493,25 @@ static int gfar_sringparam(struct net_device *dev,
return -EINVAL;
}
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- if (dev->flags & IFF_UP) {
- unsigned long flags;
-
- /* Halt TX and RX, and process the frames which
- * have already been received
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
-
- gfar_halt(dev);
-
- unlock_rx_qs(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- for (i = 0; i < priv->num_rx_queues; i++)
- gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
-
- /* Now we take down the rings to rebuild them */
+ if (dev->flags & IFF_UP)
stop_gfar(dev);
- }
- /* Change the size */
- for (i = 0; i < priv->num_rx_queues; i++) {
+ /* Change the sizes */
+ for (i = 0; i < priv->num_rx_queues; i++)
priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
- priv->tx_queue[i]->num_txbdfree =
- priv->tx_queue[i]->tx_ring_size;
- }
/* Rebuild the rings with the new size */
- if (dev->flags & IFF_UP) {
+ if (dev->flags & IFF_UP)
err = startup_gfar(dev);
- netif_tx_wake_all_queues(dev);
- }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
return err;
}
@@ -608,43 +589,29 @@ static int gfar_spauseparam(struct net_device *dev,
int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
- struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
- int err = 0, i = 0;
netdev_features_t changed = dev->features ^ features;
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0;
- if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
- gfar_vlan_mode(dev, features);
-
- if (!(changed & NETIF_F_RXCSUM))
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXCSUM)))
return 0;
- if (dev->flags & IFF_UP) {
- /* Halt TX and RX, and process the frames which
- * have already been received
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
-
- gfar_halt(dev);
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- unlock_tx_qs(priv);
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- for (i = 0; i < priv->num_rx_queues; i++)
- gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ dev->features = features;
+ if (dev->flags & IFF_UP) {
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
-
- dev->features = features;
-
err = startup_gfar(dev);
- netif_tx_wake_all_queues(dev);
+ } else {
+ gfar_mac_reset(priv);
}
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
return err;
}
@@ -1610,9 +1577,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
if (tab->index > MAX_FILER_IDX - 1)
return -EBUSY;
- /* Avoid inconsistent filer table to be processed */
- lock_rx_qs(priv);
-
/* Fill regular entries */
for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
i++)
@@ -1625,8 +1589,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
*/
gfar_write_filer(priv, i, 0x20, 0x0);
- unlock_rx_qs(priv);
-
return 0;
}
@@ -1831,6 +1793,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
struct gfar_private *priv = netdev_priv(dev);
int ret = 0;
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
mutex_lock(&priv->rx_queue_access);
switch (cmd->cmd) {
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index abc28da27042..bb568006f37d 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -414,6 +414,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
.n_alarm = 0,
.n_ext_ts = N_EXT_TS,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 1,
.adjfreq = ptp_gianfar_adjfreq,
.adjtime = ptp_gianfar_adjtime,
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
deleted file mode 100644
index e02dd1378751..000000000000
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * drivers/net/ethernet/freescale/gianfar_sysfs.c
- *
- * Gianfar Ethernet Driver
- * This driver is designed for the non-CPM ethernet controllers
- * on the 85xx and 83xx family of integrated processors
- * Based on 8260_io/fcc_enet.c
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala (galak@kernel.crashing.org)
- * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
- *
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Sysfs file creation and management
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/device.h>
-
-#include <asm/uaccess.h>
-#include <linux/module.h>
-
-#include "gianfar.h"
-
-static ssize_t gfar_show_bd_stash(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
-}
-
-static ssize_t gfar_set_bd_stash(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- int new_setting = 0;
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
- return count;
-
-
- /* Find out the new setting */
- if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
- new_setting = 1;
- else if (!strncmp("off", buf, count - 1) ||
- !strncmp("0", buf, count - 1))
- new_setting = 0;
- else
- return count;
-
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- /* Set the new stashing value */
- priv->bd_stash_en = new_setting;
-
- temp = gfar_read(&regs->attr);
-
- if (new_setting)
- temp |= ATTR_BDSTASH;
- else
- temp &= ~(ATTR_BDSTASH);
-
- gfar_write(&regs->attr, temp);
-
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
-
-static ssize_t gfar_show_rx_stash_size(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->rx_stash_size);
-}
-
-static ssize_t gfar_set_rx_stash_size(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int length = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
- return count;
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (length > priv->rx_buffer_size)
- goto out;
-
- if (length == priv->rx_stash_size)
- goto out;
-
- priv->rx_stash_size = length;
-
- temp = gfar_read(&regs->attreli);
- temp &= ~ATTRELI_EL_MASK;
- temp |= ATTRELI_EL(length);
- gfar_write(&regs->attreli, temp);
-
- /* Turn stashing on/off as appropriate */
- temp = gfar_read(&regs->attr);
-
- if (length)
- temp |= ATTR_BUFSTASH;
- else
- temp &= ~(ATTR_BUFSTASH);
-
- gfar_write(&regs->attr, temp);
-
-out:
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
- gfar_set_rx_stash_size);
-
-/* Stashing will only be enabled when rx_stash_size != 0 */
-static ssize_t gfar_show_rx_stash_index(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->rx_stash_index);
-}
-
-static ssize_t gfar_set_rx_stash_index(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned short index = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
- return count;
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (index > priv->rx_stash_size)
- goto out;
-
- if (index == priv->rx_stash_index)
- goto out;
-
- priv->rx_stash_index = index;
-
- temp = gfar_read(&regs->attreli);
- temp &= ~ATTRELI_EI_MASK;
- temp |= ATTRELI_EI(index);
- gfar_write(&regs->attreli, temp);
-
-out:
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
- gfar_set_rx_stash_index);
-
-static ssize_t gfar_show_fifo_threshold(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_threshold);
-}
-
-static ssize_t gfar_set_fifo_threshold(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int length = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (length > GFAR_MAX_FIFO_THRESHOLD)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_threshold = length;
-
- temp = gfar_read(&regs->fifo_tx_thr);
- temp &= ~FIFO_TX_THR_MASK;
- temp |= length;
- gfar_write(&regs->fifo_tx_thr, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
- gfar_set_fifo_threshold);
-
-static ssize_t gfar_show_fifo_starve(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_starve);
-}
-
-static ssize_t gfar_set_fifo_starve(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int num = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (num > GFAR_MAX_FIFO_STARVE)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_starve = num;
-
- temp = gfar_read(&regs->fifo_tx_starve);
- temp &= ~FIFO_TX_STARVE_MASK;
- temp |= num;
- gfar_write(&regs->fifo_tx_starve, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
- gfar_set_fifo_starve);
-
-static ssize_t gfar_show_fifo_starve_off(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_starve_off);
-}
-
-static ssize_t gfar_set_fifo_starve_off(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int num = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (num > GFAR_MAX_FIFO_STARVE_OFF)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_starve_off = num;
-
- temp = gfar_read(&regs->fifo_tx_starve_shutoff);
- temp &= ~FIFO_TX_STARVE_OFF_MASK;
- temp |= num;
- gfar_write(&regs->fifo_tx_starve_shutoff, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
- gfar_set_fifo_starve_off);
-
-void gfar_init_sysfs(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
- int rc;
-
- /* Initialize the default values */
- priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
- priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
- priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
-
- /* Create our sysfs files */
- rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
- if (rc)
- dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
-}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 72291a8904a9..c8299c31b21f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3261,7 +3261,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
dev->stats.tx_packets++;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 17fca323c143..c984998b34a0 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -993,7 +993,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name));
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
} else {
if (++lp->next_tx_cmd == TX_RING_SIZE)
lp->next_tx_cmd = 0;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 7628e0fd8455..538903bf13bc 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -490,7 +490,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
skb_arr[index] = skb;
tmp_addr = ehea_map_vaddr(skb->data);
if (tmp_addr == -1) {
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
q_skba->os_skbs = fill_wqes - i;
ret = 0;
break;
@@ -856,7 +856,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
skb = pr->sq_skba.arr[index];
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
pr->sq_skba.arr[index] = NULL;
}
@@ -2044,7 +2044,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
skb_copy_bits(skb, 0, imm_data, skb->len);
swqe->immediate_data_length = skb->len;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 1fc8334fc181..c9127562bd22 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1044,7 +1044,7 @@ retry_bounce:
DMA_TO_DEVICE);
out:
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
map_failed_frags:
@@ -1072,7 +1072,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
restart_poll:
- do {
+ while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@@ -1121,7 +1121,7 @@ restart_poll:
netdev->stats.rx_bytes += length;
frames_processed++;
}
- } while (frames_processed < budget);
+ }
ibmveth_replenish_task(adapter);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index bf7a01ef9a57..b56461ce674c 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1778,9 +1778,9 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
* testing, ie sending frames with bad CRC.
*/
if (unlikely(skb->no_fcs))
- cb->command |= __constant_cpu_to_le16(cb_tx_nc);
+ cb->command |= cpu_to_le16(cb_tx_nc);
else
- cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
+ cb->command &= ~cpu_to_le16(cb_tx_nc);
/* interrupt every 16 packets regardless of delay */
if ((nic->cbs_avail & ~15) == nic->cbs_avail)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index ff2d806eaef7..a5f6b11d6992 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
* 80003ES2LAN Gigabit Ethernet Controller (Serdes)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 90d363b2d280..535a9430976d 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_80003ES2LAN_H_
#define _E1000E_80003ES2LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 8fed74e3fa53..e0aa7f1efb08 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Copper)
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 08e24dc3dc0e..2e758f796d60 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_82571_H_
#define _E1000E_82571_H_
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index c2dcfcc10857..106de493373c 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
+# Copyright(c) 1999 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351c94a0cf74..d18e89212575 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -35,9 +28,11 @@
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
-#define E1000_WUC_APME 0x00000001 /* APM Enable */
-#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
-#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
/* Wake Up Filter Control */
#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 0150f7fc893d..1471c5464a89 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -269,6 +262,7 @@ struct e1000_adapter {
u32 tx_head_addr;
u32 tx_fifo_size;
u32 tx_dma_failed;
+ u32 tx_hwtstamp_timeouts;
/* Rx */
bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
@@ -333,7 +327,6 @@ struct e1000_adapter {
struct work_struct update_phy_task;
struct work_struct print_hang_task;
- bool idle_check;
int phy_hang_count;
u16 tx_ring_count;
@@ -342,6 +335,7 @@ struct e1000_adapter {
struct hwtstamp_config hwtstamp_config;
struct delayed_work systim_overflow_work;
struct sk_buff *tx_hwtstamp_skb;
+ unsigned long tx_hwtstamp_start;
struct work_struct tx_hwtstamp_work;
spinlock_t systim_lock; /* protects SYSTIML/H regsters */
struct cyclecounter cc;
@@ -476,7 +470,7 @@ void e1000e_check_options(struct e1000_adapter *adapter);
void e1000e_set_ethtool_ops(struct net_device *netdev);
int e1000e_up(struct e1000_adapter *adapter);
-void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter, bool reset);
void e1000e_reinit_locked(struct e1000_adapter *adapter);
void e1000e_reset(struct e1000_adapter *adapter);
void e1000e_power_up_phy(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d14c8f53384c..cad250bc1b99 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* ethtool support for e1000 */
@@ -111,6 +104,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
E1000_STAT("uncorr_ecc_errors", uncorr_errors),
E1000_STAT("corr_ecc_errors", corr_errors),
+ E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -332,7 +326,7 @@ static int e1000_set_settings(struct net_device *netdev,
/* reset the link */
if (netif_running(adapter->netdev)) {
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000e_up(adapter);
} else {
e1000e_reset(adapter);
@@ -380,7 +374,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000e_up(adapter);
} else {
e1000e_reset(adapter);
@@ -726,7 +720,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
pm_runtime_get_sync(netdev->dev.parent);
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
/* We can't just free everything and then setup again, because the
* ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
@@ -924,15 +918,21 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
}
if (mac->type == e1000_pch2lan) {
/* SHRAH[0,1,2] different than previous */
- if (i == 7)
+ if (i == 1)
mask &= 0xFFF4FFFF;
/* SHRAH[3] different than SHRAH[0,1,2] */
- if (i == 10)
+ if (i == 4)
mask |= (1 << 30);
+ /* RAR[1-6] owned by management engine - skipping */
+ if (i > 0)
+ i += 6;
}
REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
0xFFFFFFFF);
+ /* reset index to actual value */
+ if ((mac->type == e1000_pch2lan) && (i > 6))
+ i -= 6;
}
for (i = 0; i < mac->mta_reg_count; i++)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b7f38435d1fd..6b3de5f39a97 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
@@ -655,12 +648,20 @@ struct e1000_shadow_ram {
#define E1000_ICH8_SHADOW_RAM_WORDS 2048
+/* I218 PHY Ultra Low Power (ULP) states */
+enum e1000_ulp_state {
+ e1000_ulp_state_unknown,
+ e1000_ulp_state_off,
+ e1000_ulp_state_on,
+};
+
struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
bool nvm_k1_enabled;
bool eee_disable;
u16 eee_lp_ability;
+ enum e1000_ulp_state ulp_state;
};
struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 42f0f6717511..9866f264f55e 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* 82562G 10/100 Network Connection
* 82562G-2 10/100 Network Connection
@@ -53,6 +46,14 @@
* 82578DC Gigabit Network Connection
* 82579LM Gigabit Network Connection
* 82579V Gigabit Network Connection
+ * Ethernet Connection I217-LM
+ * Ethernet Connection I217-V
+ * Ethernet Connection I218-V
+ * Ethernet Connection I218-LM
+ * Ethernet Connection (2) I218-LM
+ * Ethernet Connection (2) I218-V
+ * Ethernet Connection (3) I218-LM
+ * Ethernet Connection (3) I218-V
*/
#include "e1000.h"
@@ -142,7 +143,9 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -239,6 +242,47 @@ out:
}
/**
+ * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
+ * @hw: pointer to the HW structure
+ *
+ * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
+ * used to reset the PHY to a quiescent state when necessary.
+ **/
+static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+
+ /* Set Phy Config Counter to 50msec */
+ mac_reg = er32(FEXTNVM3);
+ mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ ew32(FEXTNVM3, mac_reg);
+
+ /* Toggle LANPHYPC Value bit */
+ mac_reg = er32(CTRL);
+ mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, mac_reg);
+ e1e_flush();
+ usleep_range(10, 20);
+ mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, mac_reg);
+ e1e_flush();
+
+ if (hw->mac.type < e1000_pch_lpt) {
+ msleep(50);
+ } else {
+ u16 count = 20;
+
+ do {
+ usleep_range(5000, 10000);
+ } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
+
+ msleep(30);
+ }
+}
+
+/**
* e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
* @hw: pointer to the HW structure
*
@@ -247,6 +291,7 @@ out:
**/
static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
{
+ struct e1000_adapter *adapter = hw->adapter;
u32 mac_reg, fwsm = er32(FWSM);
s32 ret_val;
@@ -255,6 +300,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
e1000_gate_hw_phy_config_ich8lan(hw, true);
+ /* It is not possible to be certain of the current state of ULP
+ * so forcibly disable it.
+ */
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
+ e1000_disable_ulp_lpt_lp(hw, true);
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
e_dbg("Failed to initialize PHY flow\n");
@@ -300,33 +351,9 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
break;
}
- e_dbg("Toggling LANPHYPC\n");
-
- /* Set Phy Config Counter to 50msec */
- mac_reg = er32(FEXTNVM3);
- mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
- mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
- ew32(FEXTNVM3, mac_reg);
-
/* Toggle LANPHYPC Value bit */
- mac_reg = er32(CTRL);
- mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
- mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
- ew32(CTRL, mac_reg);
- e1e_flush();
- usleep_range(10, 20);
- mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
- ew32(CTRL, mac_reg);
- e1e_flush();
- if (hw->mac.type < e1000_pch_lpt) {
- msleep(50);
- } else {
- u16 count = 20;
- do {
- usleep_range(5000, 10000);
- } while (!(er32(CTRL_EXT) &
- E1000_CTRL_EXT_LPCD) && count--);
- usleep_range(30000, 60000);
+ e1000_toggle_lanphypc_pch_lpt(hw);
+ if (hw->mac.type >= e1000_pch_lpt) {
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -349,12 +376,31 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
hw->phy.ops.release(hw);
if (!ret_val) {
+
+ /* Check to see if able to reset PHY. Print error if not */
+ if (hw->phy.ops.check_reset_block(hw)) {
+ e_err("Reset blocked by ME\n");
+ goto out;
+ }
+
/* Reset the PHY before any access to it. Doing so, ensures
* that the PHY is in a known good state before we read/write
* PHY registers. The generic reset is sufficient here,
* because we haven't determined the PHY type yet.
*/
ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ /* On a successful reset, possibly need to wait for the PHY
+ * to quiesce to an accessible state before returning control
+ * to the calling function. If the PHY does not quiesce, then
+ * return E1000E_BLK_PHY_RESET, as this is the condition that
+ * the PHY is in.
+ */
+ ret_val = hw->phy.ops.check_reset_block(hw);
+ if (ret_val)
+ e_err("ME blocked access to PHY after reset\n");
}
out:
@@ -724,8 +770,14 @@ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
* Enable/disable EEE based on setting in dev_spec structure, the duplex of
* the link and the EEE capabilities of the link partner. The LPI Control
* register bits will remain set only if/when link is up.
+ *
+ * EEE LPI must not be asserted earlier than one second after link is up.
+ * On 82579, EEE LPI should not be enabled until such time otherwise there
+ * can be link issues with some switches. Other devices can have EEE LPI
+ * enabled immediately upon link up since they have a timer in hardware which
+ * prevents LPI from being asserted too early.
**/
-static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
s32 ret_val;
@@ -979,6 +1031,253 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
}
/**
+ * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @to_sx: boolean indicating a system power state transition to Sx
+ *
+ * When link is down, configure ULP mode to significantly reduce the power
+ * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
+ * ME firmware to start the ULP configuration. If not on an ME enabled
+ * system, configure the ULP mode by software.
+ */
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+{
+ u32 mac_reg;
+ s32 ret_val = 0;
+ u16 phy_reg;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
+ return 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME configure ULP mode in the PHY */
+ mac_reg = er32(H2ME);
+ mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+
+ goto out;
+ }
+
+ if (!to_sx) {
+ int i = 0;
+
+ /* Poll up to 5 seconds for Cable Disconnected indication */
+ while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
+ /* Bail if link is re-acquired */
+ if (er32(STATUS) & E1000_STATUS_LU)
+ return -E1000_ERR_PHY;
+
+ if (i++ == 100)
+ break;
+
+ msleep(50);
+ }
+ e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
+ (er32(FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Force SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Force SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ /* Set Inband ULP Exit, Reset to SMBus mode and
+ * Disable SMBus Release on PERST# in PHY
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ if (to_sx) {
+ if (er32(WUFC) & E1000_WUFC_LNKC)
+ phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+
+ phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+ } else {
+ phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+ }
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Set Disable SMBus Release on PERST# in MAC */
+ mac_reg = er32(FEXTNVM7);
+ mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ ew32(FEXTNVM7, mac_reg);
+
+ /* Commit ULP changes in PHY by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+release:
+ hw->phy.ops.release(hw);
+out:
+ if (ret_val)
+ e_dbg("Error in ULP enable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
+
+ return ret_val;
+}
+
+/**
+ * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @force: boolean indicating whether or not to force disabling ULP
+ *
+ * Un-configure ULP mode when link is up, the system is transitioned from
+ * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
+ * system, poll for an indication from ME that ULP has been un-configured.
+ * If not on an ME enabled system, un-configure the ULP mode by software.
+ *
+ * During nominal operation, this function is called when link is acquired
+ * to disable ULP mode (force=false); otherwise, for example when unloading
+ * the driver or during Sx->S0 transitions, this is called with force=true
+ * to forcibly disable ULP.
+ */
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+{
+ s32 ret_val = 0;
+ u32 mac_reg;
+ u16 phy_reg;
+ int i = 0;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
+ return 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (force) {
+ /* Request ME un-configure ULP mode in the PHY */
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+ }
+
+ /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+ while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
+ if (i++ == 10) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ usleep_range(10000, 20000);
+ }
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+
+ if (force) {
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+ } else {
+ /* Clear H2ME.ULP after ME ULP configuration */
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ ew32(H2ME, mac_reg);
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (force)
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+
+ /* Unforce SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val) {
+ /* The MAC might be in PCIe mode, so temporarily force to
+ * SMBus mode in order to access the PHY.
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ msleep(50);
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ }
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ /* When ULP mode was previously entered, K1 was disabled by the
+ * hardware. Re-Enable K1 in the PHY when exiting ULP.
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_PM_CTRL_K1_ENABLE;
+ e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
+
+ /* Clear ULP enabled configuration */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~(I218_ULP_CONFIG1_IND |
+ I218_ULP_CONFIG1_STICKY_ULP |
+ I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_WOL_HOST |
+ I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Commit ULP changes by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Clear Disable SMBus Release on PERST# in MAC */
+ mac_reg = er32(FEXTNVM7);
+ mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ ew32(FEXTNVM7, mac_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ if (force) {
+ e1000_phy_hw_reset(hw);
+ msleep(50);
+ }
+out:
+ if (ret_val)
+ e_dbg("Error in ULP disable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
+
+ return ret_val;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -1106,9 +1405,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
e1000e_check_downshift(hw);
/* Enable/Disable EEE after link up */
- ret_val = e1000_set_eee_pchlan(hw);
- if (ret_val)
- return ret_val;
+ if (hw->phy.type > e1000_phy_82579) {
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+ }
/* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
@@ -1374,7 +1675,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
/* RAR[1-6] are owned by manageability. Skip those and program the
* next address into the SHRA register array.
*/
- if (index < (u32)(hw->mac.rar_entry_count - 6)) {
+ if (index < (u32)(hw->mac.rar_entry_count)) {
s32 ret_val;
ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1484,11 +1785,13 @@ out:
**/
static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
{
- u32 fwsm;
+ bool blocked = false;
+ int i = 0;
- fwsm = er32(FWSM);
-
- return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
+ while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
+ (i++ < 10))
+ usleep_range(10000, 20000);
+ return blocked ? E1000_BLK_PHY_RESET : 0;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 217090df33e7..bead50f9187b 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_ICH8LAN_H_
#define _E1000E_ICH8LAN_H_
@@ -65,11 +58,16 @@
#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
/* Shared Receive Address Registers */
#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+#define E1000_H2ME 0x05B50 /* Host to ME */
+#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
+#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
+
#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_OFF1_ON2 << 4) | \
@@ -82,6 +80,9 @@
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+/* FEXT register bit definition */
+#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
+
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
@@ -95,10 +96,12 @@
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
+#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
#define PHY_PAGE_SHIFT 5
@@ -161,6 +164,16 @@
#define CV_SMB_CTRL PHY_REG(769, 23)
#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+/* I218 Ultra Low Power Configuration 1 Register */
+#define I218_ULP_CONFIG1 PHY_REG(779, 16)
+#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
+#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */
+#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */
+#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
+#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
+#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
+#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
+
/* SMBus Address Phy Register */
#define HV_SMB_ADDR PHY_REG(768, 26)
#define HV_SMB_ADDR_MASK 0x007F
@@ -195,6 +208,7 @@
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_ENABLE 0x4000
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
@@ -268,4 +282,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 2480c1091873..baa0a466d1d0 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index a61fee404ebe..4e81c2825b7a 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_MAC_H_
#define _E1000E_MAC_H_
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index e4b0f1ef92f6..cb37ff1f1321 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index 326897c29ea8..a8c27f98f7b0 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_MANAGE_H_
#define _E1000E_MANAGE_H_
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d91933c4cdd..dce377b59b2c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -885,7 +878,7 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
struct sk_buff *skb)
{
if (netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rss);
+ skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
}
/**
@@ -1097,8 +1090,14 @@ static void e1000_print_hw_hang(struct work_struct *work)
adapter->tx_hang_recheck = true;
return;
}
- /* Real hang detected */
adapter->tx_hang_recheck = false;
+
+ if (er32(TDH(0)) == er32(TDT(0))) {
+ e_dbg("false hang detected, ignoring\n");
+ return;
+ }
+
+ /* Real hang detected */
netif_stop_queue(netdev);
e1e_rphy(hw, MII_BMSR, &phy_status);
@@ -1128,6 +1127,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
phy_status, phy_1000t_status, phy_ext_status, pci_status);
+ e1000e_dump(adapter);
+
/* Suggest workaround for known h/w issue */
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
e_err("Try turning off Tx pause (flow control) via ethtool\n");
@@ -1147,9 +1148,6 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
tx_hwtstamp_work);
struct e1000_hw *hw = &adapter->hw;
- if (!adapter->tx_hwtstamp_skb)
- return;
-
if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;
@@ -1162,6 +1160,12 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
adapter->tx_hwtstamp_skb = NULL;
+ } else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ + adapter->tx_timeout_factor * HZ)) {
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ e_warn("clearing Tx timestamp hang");
} else {
/* reschedule to check later */
schedule_work(&adapter->tx_hwtstamp_work);
@@ -1701,7 +1705,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
writel(0, rx_ring->head);
- if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, 0);
else
writel(0, rx_ring->tail);
@@ -2038,13 +2042,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
msix_entry),
GFP_KERNEL);
if (adapter->msix_entries) {
+ struct e1000_adapter *a = adapter;
+
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- adapter->num_vectors);
- if (err == 0)
+ err = pci_enable_msix_range(a->pdev,
+ a->msix_entries,
+ a->num_vectors,
+ a->num_vectors);
+ if (err > 0)
return;
}
/* MSI-X failed, so fall through and try MSI */
@@ -2402,7 +2409,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
tx_ring->next_to_clean = 0;
writel(0, tx_ring->head);
- if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring, 0);
else
writel(0, tx_ring->tail);
@@ -2894,7 +2901,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
u64 tdba;
- u32 tdlen, tarc;
+ u32 tdlen, tctl, tarc;
/* Setup the HW Tx Head and Tail descriptor pointers */
tdba = tx_ring->dma;
@@ -2931,6 +2938,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* erratum work around: set txdctl the same for both queues */
ew32(TXDCTL(1), er32(TXDCTL(0)));
+ /* Program the Transmit Control Register */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
tarc = er32(TARC(0));
/* set the speed mode bit, we'll clear it if we're not at
@@ -2961,6 +2974,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* enable Report Status bit */
adapter->txd_cmd |= E1000_TXD_CMD_RS;
+ ew32(TCTL, tctl);
+
hw->mac.ops.config_collision_dist(hw);
}
@@ -2976,11 +2991,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
u32 rctl, rfctl;
u32 pages = 0;
- /* Workaround Si errata on PCHx - configure jumbo frame flow */
- if ((hw->mac.type >= e1000_pch2lan) &&
- (adapter->netdev->mtu > ETH_DATA_LEN) &&
- e1000_lv_jumbo_workaround_ich8lan(hw, true))
- e_dbg("failed to enable jumbo frame workaround mode\n");
+ /* Workaround Si errata on PCHx - configure jumbo frame flow.
+ * If jumbo frames not set, program related MAC/PHY registers
+ * to h/w defaults
+ */
+ if (hw->mac.type >= e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable|disable jumbo frame workaround mode\n");
+ }
/* Program MC offset vector base */
rctl = er32(RCTL);
@@ -3331,6 +3356,9 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
+ if (pm_runtime_suspended(netdev->dev.parent))
+ return;
+
/* Check for Promiscuous and All Multicast modes */
rctl = er32(RCTL);
@@ -3691,10 +3719,6 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
*/
static void e1000_power_down_phy(struct e1000_adapter *adapter)
{
- /* WoL is enabled */
- if (adapter->wol)
- return;
-
if (adapter->hw.phy.ops.power_down)
adapter->hw.phy.ops.power_down(&adapter->hw);
}
@@ -3911,10 +3935,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
if (!netif_running(adapter->netdev) &&
- !test_bit(__E1000_TESTING, &adapter->state)) {
+ !test_bit(__E1000_TESTING, &adapter->state))
e1000_power_down_phy(adapter);
- return;
- }
e1000_get_phy_info(hw);
@@ -3981,7 +4003,12 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
static void e1000e_update_stats(struct e1000_adapter *adapter);
-void e1000e_down(struct e1000_adapter *adapter)
+/**
+ * e1000e_down - quiesce the device and optionally reset the hardware
+ * @adapter: board private structure
+ * @reset: boolean flag to reset the hardware or not
+ */
+void e1000e_down(struct e1000_adapter *adapter, bool reset)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
@@ -4035,12 +4062,8 @@ void e1000e_down(struct e1000_adapter *adapter)
e1000_lv_jumbo_workaround_ich8lan(hw, false))
e_dbg("failed to disable jumbo frame workaround mode\n");
- if (!pci_channel_offline(adapter->pdev))
+ if (reset && !pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
-
- /* TODO: for power management, we could drop the link and
- * pci_disable_device here.
- */
}
void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4048,7 +4071,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
might_sleep();
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000e_up(adapter);
clear_bit(__E1000_RESETTING, &adapter->state);
}
@@ -4326,7 +4349,6 @@ static int e1000_open(struct net_device *netdev)
adapter->tx_hang_recheck = false;
netif_start_queue(netdev);
- adapter->idle_check = true;
hw->mac.get_link_status = true;
pm_runtime_put(&pdev->dev);
@@ -4376,14 +4398,15 @@ static int e1000_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000_free_irq(adapter);
+
+ /* Link status message must follow this format */
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
}
napi_disable(&adapter->napi);
- e1000_power_down_phy(adapter);
-
e1000e_free_tx_resources(adapter->tx_ring);
e1000e_free_rx_resources(adapter->rx_ring);
@@ -4460,11 +4483,16 @@ static void e1000e_update_phy_task(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
update_phy_task);
+ struct e1000_hw *hw = &adapter->hw;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
- e1000_get_phy_info(&adapter->hw);
+ e1000_get_phy_info(hw);
+
+ /* Enable EEE on 82579 after link up */
+ if (hw->phy.type == e1000_phy_82579)
+ e1000_set_eee_pchlan(hw);
}
/**
@@ -4799,6 +4827,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
if (adapter->phy_hang_count > 1) {
adapter->phy_hang_count = 0;
+ e_dbg("PHY appears hung - resetting\n");
schedule_work(&adapter->reset_task);
}
}
@@ -4957,15 +4986,11 @@ static void e1000_watchdog_task(struct work_struct *work)
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
- /* The link is lost so the controller stops DMA.
- * If there is queued Tx work that cannot be done
- * or if on an 8000ES2LAN which requires a Rx packet
- * buffer work-around on link down event, reset the
- * controller to flush the Tx/Rx packet buffers.
- * (Do the reset outside of interrupt context).
+ /* 8000ES2LAN requires a Rx packet buffer work-around
+ * on link down event; reset the controller to flush
+ * the Rx packet buffer.
*/
- if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
@@ -4988,6 +5013,15 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
+ /* If the link is lost the controller stops DMA, but
+ * if there is queued Tx work it cannot be done. So
+ * reset the controller to flush the Tx packet buffers.
+ */
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ adapter->flags |= FLAG_RESTART_NOW;
+
+ /* If reset is necessary, do it outside of interrupt context. */
if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
@@ -5546,6 +5580,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
adapter->tx_hwtstamp_skb = skb_get(skb);
+ adapter->tx_hwtstamp_start = jiffies;
schedule_work(&adapter->tx_hwtstamp_work);
} else {
skb_tx_timestamp(skb);
@@ -5684,8 +5719,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->max_frame_size = max_frame;
e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (netif_running(netdev))
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
@@ -5711,6 +5749,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
else
e1000e_reset(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
+
clear_bit(__E1000_RESETTING, &adapter->state);
return 0;
@@ -5852,7 +5892,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
{
struct e1000_hw *hw = &adapter->hw;
- u32 i, mac_reg;
+ u32 i, mac_reg, wuc;
u16 phy_reg, wuc_enable;
int retval;
@@ -5899,13 +5939,18 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
phy_reg |= BM_RCTL_RFCE;
hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
+ wuc = E1000_WUC_PME_EN;
+ if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
+ wuc |= E1000_WUC_APME;
+
/* enable PHY wakeup in MAC register */
ew32(WUFC, wufc);
- ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+ ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
+ E1000_WUC_PME_STATUS | wuc));
/* configure and enable PHY wakeup in PHY registers */
hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
- hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
/* activate PHY wakeup */
wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
@@ -5918,15 +5963,10 @@ release:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+static int e1000e_pm_freeze(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, ctrl_ext, rctl, status;
- /* Runtime suspend should only enable wakeup for link changes */
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
- int retval = 0;
netif_device_detach(netdev);
@@ -5937,11 +5977,29 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
usleep_range(10000, 20000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- e1000e_down(adapter);
+
+ /* Quiesce the device without resetting the hardware */
+ e1000e_down(adapter, false);
e1000_free_irq(adapter);
}
e1000e_reset_interrupt_capability(adapter);
+ /* Allow time for pending master requests to run */
+ e1000e_disable_pcie_master(&adapter->hw);
+
+ return 0;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, ctrl_ext, rctl, status;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ int retval = 0;
+
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -5972,12 +6030,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
ew32(CTRL_EXT, ctrl_ext);
}
+ if (!runtime)
+ e1000e_power_up_phy(adapter);
+
if (adapter->flags & FLAG_IS_ICH)
e1000_suspend_workarounds_ich8lan(&adapter->hw);
- /* Allow time for pending master requests to run */
- e1000e_disable_pcie_master(&adapter->hw);
-
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
/* enable wakeup by the PHY */
retval = e1000_init_phy_wakeup(adapter, wufc);
@@ -5991,10 +6049,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
} else {
ew32(WUC, 0);
ew32(WUFC, 0);
+
+ e1000_power_down_phy(adapter);
}
- if (adapter->hw.phy.type == e1000_phy_igp_3)
+ if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+ } else if (hw->mac.type == e1000_pch_lpt) {
+ if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ /* ULP does not support wake from unicast, multicast
+ * or broadcast.
+ */
+ retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
+
+ if (retval)
+ return retval;
+ }
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -6102,18 +6173,12 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
}
#ifdef CONFIG_PM
-static bool e1000e_pm_ready(struct e1000_adapter *adapter)
-{
- return !!adapter->tx_ring->buffer_info;
-}
-
static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u16 aspm_disable_flag = 0;
- u32 err;
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -6124,13 +6189,6 @@ static int __e1000_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- e1000e_set_interrupt_capability(adapter);
- if (netif_running(netdev)) {
- err = e1000_request_irq(adapter);
- if (err)
- return err;
- }
-
if (hw->mac.type >= e1000_pch2lan)
e1000_resume_workarounds_pchlan(&adapter->hw);
@@ -6169,11 +6227,6 @@ static int __e1000_resume(struct pci_dev *pdev)
e1000_init_manageability_pt(adapter);
- if (netif_running(netdev))
- e1000e_up(adapter);
-
- netif_device_attach(netdev);
-
/* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
@@ -6184,75 +6237,111 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
+static int e1000e_pm_thaw(struct device *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ e1000e_set_interrupt_capability(adapter);
+ if (netif_running(netdev)) {
+ u32 err = e1000_request_irq(adapter);
+
+ if (err)
+ return err;
+
+ e1000e_up(adapter);
+ }
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
-static int e1000_suspend(struct device *dev)
+static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ e1000e_pm_freeze(dev);
+
return __e1000_shutdown(pdev, false);
}
-static int e1000_resume(struct device *dev)
+static int e1000e_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc;
- if (e1000e_pm_ready(adapter))
- adapter->idle_check = true;
+ rc = __e1000_resume(pdev);
+ if (rc)
+ return rc;
- return __e1000_resume(pdev);
+ return e1000e_pm_thaw(dev);
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
-static int e1000_runtime_suspend(struct device *dev)
+static int e1000e_pm_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (!e1000e_pm_ready(adapter))
- return 0;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
- return __e1000_shutdown(pdev, true);
+ return -EBUSY;
}
-static int e1000_idle(struct device *dev)
+static int e1000e_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc;
- if (!e1000e_pm_ready(adapter))
- return 0;
+ rc = __e1000_resume(pdev);
+ if (rc)
+ return rc;
- if (adapter->idle_check) {
- adapter->idle_check = false;
- if (!e1000e_has_link(adapter))
- pm_schedule_suspend(dev, MSEC_PER_SEC);
- }
+ if (netdev->flags & IFF_UP)
+ rc = e1000e_up(adapter);
- return -EBUSY;
+ return rc;
}
-static int e1000_runtime_resume(struct device *dev)
+static int e1000e_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (!e1000e_pm_ready(adapter))
- return 0;
+ if (netdev->flags & IFF_UP) {
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+ usleep_range(10000, 20000);
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+ /* Down the device without resetting the hardware */
+ e1000e_down(adapter, false);
+ }
- adapter->idle_check = !dev->power.runtime_auto;
- return __e1000_resume(pdev);
+ if (__e1000_shutdown(pdev, true)) {
+ e1000e_pm_runtime_resume(dev);
+ return -EBUSY;
+ }
+
+ return 0;
}
#endif /* CONFIG_PM_RUNTIME */
#endif /* CONFIG_PM */
static void e1000_shutdown(struct pci_dev *pdev)
{
+ e1000e_pm_freeze(&pdev->dev);
+
__e1000_shutdown(pdev, false);
}
@@ -6338,7 +6427,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
if (netif_running(netdev))
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
pci_disable_device(pdev);
/* Request a slot slot reset. */
@@ -6350,7 +6439,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the e1000_resume routine.
+ * resembles the first-half of the e1000e_pm_resume routine.
*/
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
{
@@ -6397,7 +6486,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the e1000_resume routine.
+ * second-half of the e1000e_pm_resume routine.
*/
static void e1000_io_resume(struct pci_dev *pdev)
{
@@ -6902,9 +6991,6 @@ static void e1000_remove(struct pci_dev *pdev)
}
}
- if (!(netdev->flags & IFF_UP))
- e1000_power_down_phy(adapter);
-
/* Don't lie to e1000_close() down the road. */
if (!down)
clear_bit(__E1000_DOWN, &adapter->state);
@@ -7026,9 +7112,16 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
static const struct dev_pm_ops e1000_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
- SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
- e1000_idle)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = e1000e_pm_suspend,
+ .resume = e1000e_pm_resume,
+ .freeze = e1000e_pm_freeze,
+ .thaw = e1000e_pm_thaw,
+ .poweroff = e1000e_pm_suspend,
+ .restore = e1000e_pm_resume,
+#endif
+ SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
+ e1000e_pm_runtime_idle)
};
/* PCI Device API Driver */
@@ -7055,7 +7148,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index d70a03906ac0..a9a976f04bff 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 45fc69561627..342bf69efab5 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_NVM_H_
#define _E1000E_NVM_H_
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index c16bd75b6caa..d0ac0f3249c8 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/netdevice.h>
#include <linux/module.h>
@@ -381,6 +374,12 @@ void e1000e_check_options(struct e1000_adapter *adapter)
"%s set to dynamic mode\n", opt.name);
adapter->itr = 20000;
break;
+ case 2:
+ dev_info(&adapter->pdev->dev,
+ "%s Invalid mode - setting default\n",
+ opt.name);
+ adapter->itr_setting = opt.def;
+ /* fall-through */
case 3:
dev_info(&adapter->pdev->dev,
"%s set to dynamic conservative mode\n",
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 20e71f4ca426..00b3fc98bf30 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index f4f71b9991e3..3841bccf058c 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_PHY_H_
#define _E1000E_PHY_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 065f8c80d4f2..fb1a914a3ad4 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* PTP 1588 Hardware Clock (PHC)
* Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
@@ -47,6 +40,7 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ptp_clock_info);
struct e1000_hw *hw = &adapter->hw;
bool neg_adj = false;
+ unsigned long flags;
u64 adjustment;
u32 timinca, incvalue;
s32 ret_val;
@@ -64,6 +58,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
if (ret_val)
return ret_val;
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+
incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
adjustment = incvalue;
@@ -77,6 +73,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ew32(TIMINCA, timinca);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
return 0;
}
@@ -191,6 +189,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index a7e6a3e37257..ea235bbe50d3 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_REGS_H_
#define _E1000E_REGS_H_
@@ -39,6 +32,7 @@
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXT 0x0002C /* Future Extended - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 72dae4d97b43..beb7b4393a6c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -86,12 +86,12 @@
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 8
-#define I40E_NVM_VERSION_HI_MASK (0xff << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 12
+#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
-#define I40E_CURRENT_NVM_VERSION_LO 0x30
+#define I40E_CURRENT_NVM_VERSION_LO 0x40
/* magic for getting defines into strings */
#define STRINGIFY(foo) #foo
@@ -136,6 +136,7 @@ enum i40e_state_t {
__I40E_EMP_RESET_REQUESTED,
__I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED,
+ __I40E_BAD_EEPROM,
};
enum i40e_interrupt_policy {
@@ -152,8 +153,21 @@ struct i40e_lump_tracking {
};
#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
-#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512
-struct i40e_fdir_data {
+#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
+#define I40E_FDIR_BUFFER_FULL_MARGIN 10
+#define I40E_FDIR_BUFFER_HEAD_ROOM 200
+
+struct i40e_fdir_filter {
+ struct hlist_node fdir_node;
+ /* filter ipnut set */
+ u8 flow_type;
+ u8 ip4_proto;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 sctp_v_tag;
+ /* filter control */
u16 q_index;
u8 flex_off;
u8 pctype;
@@ -162,7 +176,6 @@ struct i40e_fdir_data {
u8 fd_status;
u16 cnt_index;
u32 fd_id;
- u8 *raw_packet;
};
#define I40E_ETH_P_LLDP 0x88cc
@@ -196,7 +209,7 @@ struct i40e_pf {
bool fc_autoneg_status;
u16 eeprom_version;
- u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */
+ u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num vfs requested for this vf */
@@ -210,6 +223,9 @@ struct i40e_pf {
u8 atr_sample_rate;
bool wol_en;
+ struct hlist_head fdir_filter_list;
+ u16 fdir_pf_active_filters;
+
#ifdef CONFIG_I40E_VXLAN
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
u16 pending_vxlan_bitmap;
@@ -251,6 +267,9 @@ struct i40e_pf {
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif
+ /* tracks features that get auto disabled by errors */
+ u64 auto_disable_flags;
+
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
@@ -477,10 +496,10 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
"f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
- (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
- >> I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
- >> I40E_NVM_VERSION_LO_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
+ I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
+ I40E_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
return buf;
@@ -534,9 +553,13 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add);
-
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input, bool add);
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
+int i40e_get_current_fd_count(struct i40e_pf *pf);
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -575,6 +598,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index a50e6b3479ae..ed3902bf249b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -647,9 +647,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
- memset((void *)details, 0,
- sizeof(struct i40e_asq_cmd_details));
+ memset(desc, 0, sizeof(*desc));
+ memset(details, 0, sizeof(*details));
ntc++;
if (ntc == asq->count)
ntc = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e7f38b57834d..922cdcc45c54 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -162,6 +162,372 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
@@ -1409,9 +1775,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
- p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ p = &hw->dev_caps;
else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
- p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ p = &hw->func_caps;
else
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 50730141bb7b..036570d76176 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -332,6 +332,7 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
u16 type;
u16 length;
u16 typelength;
+ u16 offset = 0;
if (!lldpmib || !dcbcfg)
return I40E_ERR_PARAM;
@@ -339,15 +340,17 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
/* set to the start of LLDPDU */
lldpmib += ETH_HLEN;
tlv = (struct i40e_lldp_org_tlv *)lldpmib;
- while (tlv) {
+ while (1) {
typelength = ntohs(tlv->typelength);
type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
I40E_LLDP_TLV_TYPE_SHIFT);
length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
- if (type == I40E_TLV_TYPE_END)
- break;/* END TLV break out */
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
switch (type) {
case I40E_TLV_TYPE_ORG:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da22c3fa2c00..3c37386fd138 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1011,10 +1011,12 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
**/
static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
{
- if (enable)
+ if (enable) {
pf->flags |= flag;
- else
+ } else {
pf->flags &= ~flag;
+ pf->auto_disable_flags |= flag;
+ }
dev_info(&pf->pdev->dev, "requesting a pf reset\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
}
@@ -1467,19 +1469,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->msg_enable);
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
- dev_info(&pf->pdev->dev, "forcing PFR\n");
+ dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
- dev_info(&pf->pdev->dev, "forcing CoreR\n");
+ dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
- dev_info(&pf->pdev->dev, "forcing GlobR\n");
+ dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "empr", 4) == 0) {
- dev_info(&pf->pdev->dev, "forcing EMPR\n");
+ dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
@@ -1663,28 +1665,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
- struct i40e_fdir_data fd_data;
+ struct i40e_fdir_filter fd_data;
u16 packet_len, i, j = 0;
char *asc_packet;
+ u8 *raw_packet;
bool add = false;
int ret;
- asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ goto command_write_done;
+
+ if (strncmp(cmd_buf, "add", 3) == 0)
+ add = true;
+
+ if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ goto command_write_done;
+
+ asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
GFP_KERNEL);
if (!asc_packet)
goto command_write_done;
- fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
- GFP_KERNEL);
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
+ GFP_KERNEL);
- if (!fd_data.raw_packet) {
+ if (!raw_packet) {
kfree(asc_packet);
asc_packet = NULL;
goto command_write_done;
}
- if (strncmp(cmd_buf, "add", 3) == 0)
- add = true;
cnt = sscanf(&cmd_buf[13],
"%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
&fd_data.q_index,
@@ -1698,36 +1708,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt);
kfree(asc_packet);
asc_packet = NULL;
- kfree(fd_data.raw_packet);
+ kfree(raw_packet);
goto command_write_done;
}
/* fix packet length if user entered 0 */
if (packet_len == 0)
- packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP;
+ packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
/* make sure to check the max as well */
packet_len = min_t(u16,
- packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+ packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
for (i = 0; i < packet_len; i++) {
sscanf(&asc_packet[j], "%2hhx ",
- &fd_data.raw_packet[i]);
+ &raw_packet[i]);
j += 3;
}
dev_info(&pf->pdev->dev, "FD raw packet dump\n");
print_hex_dump(KERN_INFO, "FD raw packet: ",
DUMP_PREFIX_OFFSET, 16, 1,
- fd_data.raw_packet, packet_len, true);
- ret = i40e_program_fdir_filter(&fd_data, pf, add);
+ raw_packet, packet_len, true);
+ ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
if (!ret) {
dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
} else {
dev_info(&pf->pdev->dev,
"Filter command send failed %d\n", ret);
}
- kfree(fd_data.raw_packet);
- fd_data.raw_packet = NULL;
+ kfree(raw_packet);
+ raw_packet = NULL;
kfree(asc_packet);
asc_packet = NULL;
} else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
@@ -2077,9 +2087,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev,
"tx_timeout: VSI %d not found\n", vsi_seid);
- goto netdev_ops_write_done;
- }
- if (rtnl_trylock()) {
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (test_bit(__I40E_DOWN, &vsi->state)) {
+ dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
rtnl_unlock();
dev_info(&pf->pdev->dev, "tx_timeout called\n");
@@ -2098,9 +2112,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev,
"change_mtu: VSI %d not found\n", vsi_seid);
- goto netdev_ops_write_done;
- }
- if (rtnl_trylock()) {
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
mtu);
rtnl_unlock();
@@ -2119,9 +2134,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev,
"set_rx_mode: VSI %d not found\n", vsi_seid);
- goto netdev_ops_write_done;
- }
- if (rtnl_trylock()) {
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
rtnl_unlock();
dev_info(&pf->pdev->dev, "set_rx_mode called\n");
@@ -2139,11 +2155,14 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
vsi_seid);
- goto netdev_ops_write_done;
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
+ vsi_seid);
+ } else {
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ napi_schedule(&vsi->q_vectors[i]->napi);
+ dev_info(&pf->pdev->dev, "napi called\n");
}
- for (i = 0; i < vsi->num_q_vectors; i++)
- napi_schedule(&vsi->q_vectors[i]->napi);
- dev_info(&pf->pdev->dev, "napi called\n");
} else {
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
i40e_dbg_netdev_ops_buf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b1d7d8c5cb9b..03d99cbc5c25 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -62,6 +62,9 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_crc_errors),
};
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd);
+
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
* but they are separate. This device supports Virtualization, and
* as such might have several netdevs supporting VMDq and FCoE going
@@ -84,6 +87,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+ I40E_PF_STAT("tx_timeout", tx_timeout_count),
I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -110,6 +114,11 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ /* LPI stats */
+ I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
+ I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
+ I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
+ I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
};
#define I40E_QUEUE_STATS_LEN(n) \
@@ -387,7 +396,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
ret_val = i40e_aq_read_nvm(hw, 0x0,
eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
len,
- (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+ eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
if (ret_val) {
dev_info(&pf->pdev->dev,
@@ -399,7 +408,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
release_nvm:
i40e_release_nvm(hw);
- memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
+ memcpy(bytes, eeprom_buff, eeprom->len);
free_buff:
kfree(eeprom_buff);
return ret_val;
@@ -649,18 +658,18 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
/* process Tx ring statistics */
do {
- start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
data[i] = tx_ring->stats.packets;
data[i + 1] = tx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
- start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
data[i + 2] = rx_ring->stats.packets;
data[i + 3] = rx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
}
rcu_read_unlock();
if (vsi == pf->vsi[pf->lan_vsi]) {
@@ -1112,6 +1121,84 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
}
/**
+ * i40e_get_ethtool_fdir_all - Populates the rule count of a command
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ * @rule_locs: Array of used rule locations
+ *
+ * This function populates both the total and actual rule count of
+ * the ethtool flow classification command
+ *
+ * Returns 0 on success or -EMSGSIZE if entry not found
+ **/
+static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = pf->hw.fdir_shared_filter_count +
+ pf->fdir_pf_filter_count;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = rule->fd_id;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+/**
+ * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * This function looks up a filter based on the Rx flow classification
+ * command and fills the flow spec info for it if found
+ *
+ * Returns 0 on success or -EINVAL if filter not found
+ **/
+static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct i40e_fdir_filter *rule = NULL;
+ struct hlist_node *node2;
+
+ /* report total rule count */
+ cmd->data = pf->hw.fdir_shared_filter_count +
+ pf->fdir_pf_filter_count;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (fsp->location <= rule->fd_id)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->fd_id)
+ return -EINVAL;
+
+ fsp->flow_type = rule->flow_type;
+ fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0];
+ fsp->ring_cookie = rule->q_index;
+
+ return 0;
+}
+
+/**
* i40e_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -1135,15 +1222,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = i40e_get_rss_hash_opts(pf, cmd);
break;
case ETHTOOL_GRXCLSRLCNT:
- cmd->rule_cnt = 10;
+ cmd->rule_cnt = pf->fdir_pf_active_filters;
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
- ret = 0;
+ ret = i40e_get_ethtool_fdir_entry(pf, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- cmd->data = 500;
- ret = 0;
+ ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
+ break;
default:
break;
}
@@ -1274,289 +1361,182 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return 0;
}
-#define IP_HEADER_OFFSET 14
-#define I40E_UDPIP_DUMMY_PACKET_LEN 42
/**
- * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
+ * i40e_match_fdir_input_set - Match a new filter against an existing one
+ * @rule: The filter already added
+ * @input: The new filter to comapre against
*
- * Returns 0 if the filters were successfully added or removed
+ * Returns true if the two input set match
**/
-static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
+static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
+ struct i40e_fdir_filter *input)
{
- struct i40e_pf *pf = vsi->back;
- struct udphdr *udp;
- struct iphdr *ip;
- bool err = false;
- int ret;
- int i;
- char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0};
-
- memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
-
- ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
- udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
-
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
- udp->source = fsp->h_u.tcp_ip4_spec.psrc;
- udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
-
- for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
- i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
- }
-
- return err ? -EOPNOTSUPP : 0;
+ if ((rule->dst_ip[0] != input->dst_ip[0]) ||
+ (rule->src_ip[0] != input->src_ip[0]) ||
+ (rule->dst_port != input->dst_port) ||
+ (rule->src_port != input->src_port))
+ return false;
+ return true;
}
-#define I40E_TCPIP_DUMMY_PACKET_LEN 54
/**
- * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
+ * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @input: The filter to update or NULL to indicate deletion
+ * @sw_idx: Software index to the filter
+ * @cmd: The command to get or set Rx flow classification rules
*
- * Returns 0 if the filters were successfully added or removed
+ * This function updates (or deletes) a Flow Director entry from
+ * the hlist of the corresponding PF
+ *
+ * Returns 0 on success
**/
-static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
+static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input,
+ u16 sw_idx,
+ struct ethtool_rxnfc *cmd)
{
+ struct i40e_fdir_filter *rule, *parent;
struct i40e_pf *pf = vsi->back;
- struct tcphdr *tcp;
- struct iphdr *ip;
- bool err = false;
- int ret;
- /* Dummy packet */
- char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
-
- memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
-
- ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
- tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
-
- ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
- tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
-
- if (add) {
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
- }
+ struct hlist_node *node2;
+ int err = -EINVAL;
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
- ret = i40e_program_fdir_filter(fd_data, pf, add);
+ parent = NULL;
+ rule = NULL;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->fd_id >= sw_idx)
+ break;
+ parent = rule;
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
-
- ret = i40e_program_fdir_filter(fd_data, pf, add);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->fd_id == sw_idx)) {
+ if (input && !i40e_match_fdir_input_set(rule, input))
+ err = i40e_add_del_fdir(vsi, rule, false);
+ else if (!input)
+ err = i40e_add_del_fdir(vsi, rule, false);
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ pf->fdir_pf_active_filters--;
}
- return err ? -EOPNOTSUPP : 0;
-}
+ /* If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
-/**
- * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
- *
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
-{
- return -EOPNOTSUPP;
+ /* initialize node and set software index */
+ INIT_HLIST_NODE(&input->fdir_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_after(&parent->fdir_node, &input->fdir_node);
+ else
+ hlist_add_head(&input->fdir_node,
+ &pf->fdir_filter_list);
+
+ /* update counts */
+ pf->fdir_pf_active_filters++;
+
+ return 0;
}
-#define I40E_IP_DUMMY_PACKET_LEN 34
/**
- * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required for the FDir descriptor
- * @fsp: the ethtool flow spec
- * @add: true adds a filter, false removes it
+ * i40e_del_fdir_entry - Deletes a Flow Director filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @cmd: The command to get or set Rx flow classification rules
*
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
+ * The function removes a Flow Director filter entry from the
+ * hlist of the corresponding PF
+ *
+ * Returns 0 on success
+ */
+static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd)
{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
struct i40e_pf *pf = vsi->back;
- struct iphdr *ip;
- bool err = false;
- int ret;
- int i;
- char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-
- memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
- ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+ int ret = 0;
- ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
- ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
- ip->protocol = fsp->h_u.usr_ip4_spec.proto;
+ ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
- for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
- }
-
- return err ? -EOPNOTSUPP : 0;
+ i40e_fdir_check_and_reenable(pf);
+ return ret;
}
/**
- * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
- * a specific flow spec based on their protocol
+ * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
* @vsi: pointer to the targeted VSI
* @cmd: command to get or set RX flow classification rules
- * @add: true adds a filter, false removes it
*
- * Returns 0 if the filters were successfully added or removed
+ * Add Flow Director filters for a specific flow spec based on their
+ * protocol. Returns 0 if the filters were successfully added.
**/
-static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
- struct ethtool_rxnfc *cmd, bool add)
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd)
{
- struct i40e_fdir_data fd_data;
- int ret = -EINVAL;
+ struct ethtool_rx_flow_spec *fsp;
+ struct i40e_fdir_filter *input;
struct i40e_pf *pf;
- struct ethtool_rx_flow_spec *fsp =
- (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int ret = -EINVAL;
if (!vsi)
return -EINVAL;
pf = vsi->back;
- if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
- (fsp->ring_cookie >= vsi->num_queue_pairs))
- return -EINVAL;
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return -EOPNOTSUPP;
- /* Populate the Flow Director that we have at the moment
- * and allocate the raw packet buffer for the calling functions
- */
- fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
- GFP_KERNEL);
+ if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+ return -ENOSPC;
- if (!fd_data.raw_packet) {
- dev_info(&pf->pdev->dev, "Could not allocate memory\n");
- return -ENOMEM;
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
+ pf->hw.func_caps.fd_filters_guaranteed)) {
+ return -EINVAL;
}
- fd_data.q_index = fsp->ring_cookie;
- fd_data.flex_off = 0;
- fd_data.pctype = 0;
- fd_data.dest_vsi = vsi->id;
- fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
- fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
- fd_data.cnt_index = 0;
- fd_data.fd_id = 0;
+ if (fsp->ring_cookie >= vsi->num_queue_pairs)
+ return -EINVAL;
- switch (fsp->flow_type & ~FLOW_EXT) {
- case TCP_V4_FLOW:
- ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
- break;
- case UDP_V4_FLOW:
- ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
- break;
- case SCTP_V4_FLOW:
- ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
- break;
- case IPV4_FLOW:
- ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
- break;
- case IP_USER_FLOW:
- switch (fsp->h_u.usr_ip4_spec.proto) {
- case IPPROTO_TCP:
- ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
- break;
- case IPPROTO_UDP:
- ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
- break;
- case IPPROTO_SCTP:
- ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
- break;
- default:
- ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
- break;
- }
- break;
- default:
- dev_info(&pf->pdev->dev, "Could not specify spec type\n");
- ret = -EINVAL;
- }
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+
+ if (!input)
+ return -ENOMEM;
- kfree(fd_data.raw_packet);
- fd_data.raw_packet = NULL;
+ input->fd_id = fsp->location;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ else
+ input->dest_ctl =
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+
+ input->q_index = fsp->ring_cookie;
+ input->flex_off = 0;
+ input->pctype = 0;
+ input->dest_vsi = vsi->id;
+ input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
+ input->cnt_index = 0;
+ input->flow_type = fsp->flow_type;
+ input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
+ input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ ret = i40e_add_del_fdir(vsi, input, true);
+ if (ret)
+ kfree(input);
+ else
+ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
return ret;
}
@@ -1580,10 +1560,10 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
ret = i40e_set_rss_hash_opt(pf, cmd);
break;
case ETHTOOL_SRXCLSRLINS:
- ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
+ ret = i40e_add_fdir_ethtool(vsi, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
- ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
+ ret = i40e_del_fdir_entry(vsi, cmd);
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b901371ca361..861b722c2672 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,6 +26,7 @@
/* Local includes */
#include "i40e.h"
+#include "i40e_diag.h"
#ifdef CONFIG_I40E_VXLAN
#include <net/vxlan.h>
#endif
@@ -38,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 30
+#define DRV_VERSION_BUILD 36
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -305,6 +306,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ set_bit(__I40E_DOWN, &vsi->state);
i40e_down(vsi);
break;
}
@@ -375,20 +377,20 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
continue;
do {
- start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+ start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
packets = tx_ring->stats.packets;
bytes = tx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
rx_ring = &tx_ring[1];
do {
- start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+ start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
packets = rx_ring->stats.packets;
bytes = rx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
@@ -739,6 +741,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
u32 rx_page, rx_buf;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
+ u32 val;
int i;
u16 q;
@@ -769,10 +772,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
p = ACCESS_ONCE(vsi->tx_rings[q]);
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
@@ -781,10 +784,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
@@ -971,6 +974,20 @@ void i40e_update_stats(struct i40e_vsi *vsi)
i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_jabber, &nsd->rx_jabber);
+
+ val = rd32(hw, I40E_PRTPM_EEE_STAT);
+ nsd->tx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ nsd->rx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ pf->stat_offsets_loaded,
+ &osd->tx_lpi_count, &nsd->tx_lpi_count);
+ i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
+ pf->stat_offsets_loaded,
+ &osd->rx_lpi_count, &nsd->rx_lpi_count);
}
pf->stat_offsets_loaded = true;
@@ -1964,11 +1981,14 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
- /* If the network stack called us with vid = 0, we should
- * indicate to i40e_vsi_add_vlan() that we want to receive
- * any traffic (i.e. with any vlan tag, or untagged)
+ /* If the network stack called us with vid = 0 then
+ * it is asking to receive priority tagged packets with
+ * vlan id 0. Our HW receives them by default when configured
+ * to receive untagged packets so there is no need to add an
+ * extra filter for vlan 0 tagged packets.
*/
- ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
+ if (vid)
+ ret = i40e_vsi_add_vlan(vsi, vid);
if (!ret && (vid < VLAN_N_VID))
set_bit(vid, vsi->active_vlans);
@@ -1981,7 +2001,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
* @netdev: network interface to be adjusted
* @vid: vlan id to be removed
*
- * net_device_ops implementation for adding vlan ids
+ * net_device_ops implementation for removing vlan ids
**/
static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
@@ -2177,6 +2197,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED));
tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+ /* FDIR VSI tx ring can still use RS bit and writebacks */
+ if (vsi->type != I40E_VSI_FDIR)
+ tx_ctx.head_wb_ena = 1;
+ tx_ctx.head_wb_addr = ring->dma +
+ (ring->count * sizeof(struct i40e_tx_desc));
/* As part of VSI creation/update, FW allocates certain
* Tx arbitration queue sets for each TC enabled for
@@ -2420,6 +2445,28 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
}
/**
+ * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
+ * @vsi: Pointer to the targeted VSI
+ *
+ * This function replays the hlist on the hw where all the SB Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
+{
+ struct i40e_fdir_filter *filter;
+ struct i40e_pf *pf = vsi->back;
+ struct hlist_node *node;
+
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return;
+
+ hlist_for_each_entry_safe(filter, node,
+ &pf->fdir_filter_list, fdir_node) {
+ i40e_add_del_fdir(vsi, filter, true);
+ }
+}
+
+/**
* i40e_vsi_configure - Set up the VSI for action
* @vsi: the VSI being configured
**/
@@ -2557,7 +2604,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
- /* Associate the queue pair to the vector and enable the q int */
+ /* Associate the queue pair to the vector and enable the queue int */
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
(I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
@@ -2831,12 +2878,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
>> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- if (val == I40E_RESET_CORER)
+ if (val == I40E_RESET_CORER) {
pf->corer_count++;
- else if (val == I40E_RESET_GLOBR)
+ } else if (val == I40E_RESET_GLOBR) {
pf->globr_count++;
- else if (val == I40E_RESET_EMPR)
+ } else if (val == I40E_RESET_EMPR) {
pf->empr_count++;
+ set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ }
}
if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
@@ -2866,8 +2915,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
icr0_remaining);
if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
+ (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
dev_info(&pf->pdev->dev, "device will be reset\n");
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
i40e_service_event_schedule(pf);
@@ -3107,13 +3155,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
+ for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
- ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
-
+ if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
/* Skip if the queue is already in the requested state */
if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
continue;
@@ -3123,8 +3171,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* turn on/off the queue */
if (enable) {
wr32(hw, I40E_QTX_HEAD(pf_q), 0);
- tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
- I40E_QTX_ENA_QENA_STAT_MASK;
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
} else {
tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
}
@@ -3171,12 +3218,13 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
+ for (j = 0; j < 50; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
- ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
+ if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
if (enable) {
/* is STAT set ? */
@@ -3190,11 +3238,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
/* turn on/off the queue */
if (enable)
- rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK;
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
else
- rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK);
+ rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
/* wait for the change to finish */
@@ -3732,8 +3778,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
NULL);
if (aq_ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "AQ command Config VSI BW allocation per TC failed = %d\n",
+ vsi->back->hw.aq.asq_last_status);
return -EINVAL;
}
@@ -4062,6 +4108,10 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
} else if (vsi->netdev) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
}
+
+ /* replay FDIR SB filters */
+ if (vsi->type == I40E_VSI_FDIR)
+ i40e_fdir_filter_restore(vsi);
i40e_service_event_schedule(pf);
return 0;
@@ -4208,15 +4258,40 @@ static int i40e_open(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- char int_name[IFNAMSIZ];
int err;
- /* disallow open during test */
- if (test_bit(__I40E_TESTING, &pf->state))
+ /* disallow open during test or if eeprom is broken */
+ if (test_bit(__I40E_TESTING, &pf->state) ||
+ test_bit(__I40E_BAD_EEPROM, &pf->state))
return -EBUSY;
netif_carrier_off(netdev);
+ err = i40e_vsi_open(vsi);
+ if (err)
+ return err;
+
+#ifdef CONFIG_I40E_VXLAN
+ vxlan_get_rx_port(netdev);
+#endif
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_open -
+ * @vsi: the VSI to open
+ *
+ * Finish initialization of the VSI.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+int i40e_vsi_open(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ char int_name[IFNAMSIZ];
+ int err;
+
/* allocate descriptors */
err = i40e_vsi_setup_tx_resources(vsi);
if (err)
@@ -4229,18 +4304,22 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ if (!vsi->netdev) {
+ err = EINVAL;
+ goto err_setup_rx;
+ }
snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), netdev->name);
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
err = i40e_vsi_request_irq(vsi, int_name);
if (err)
goto err_setup_rx;
/* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
+ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
if (err)
goto err_set_queues;
- err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
+ err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
if (err)
goto err_set_queues;
@@ -4248,10 +4327,6 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_up_complete;
-#ifdef CONFIG_I40E_VXLAN
- vxlan_get_rx_port(netdev);
-#endif
-
return 0;
err_up_complete:
@@ -4269,6 +4344,26 @@ err_setup_tx:
}
/**
+ * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
+ * @pf: Pointer to pf
+ *
+ * This function destroys the hlist where all the Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_exit(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *filter;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(filter, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+ pf->fdir_pf_active_filters = 0;
+}
+
+/**
* i40e_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -4321,7 +4416,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* for the warning interrupt will deal with the shutdown
* and recovery of the switch setup.
*/
- dev_info(&pf->pdev->dev, "GlobalR requested\n");
+ dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4332,7 +4427,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
*
* Same as Global Reset, except does *not* include the MAC/PHY
*/
- dev_info(&pf->pdev->dev, "CoreR requested\n");
+ dev_dbg(&pf->pdev->dev, "CoreR requested\n");
val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
val |= I40E_GLGEN_RTRIG_CORER_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4366,7 +4461,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* the switch, since we need to do all the recovery as
* for the Core Reset.
*/
- dev_info(&pf->pdev->dev, "PFR requested\n");
+ dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf);
} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
@@ -4415,18 +4510,18 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
&old_cfg->etscfg.prioritytable,
sizeof(new_cfg->etscfg.prioritytable))) {
need_reconfig = true;
- dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
}
if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable,
sizeof(new_cfg->etscfg.tcbwtable)))
- dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable,
sizeof(new_cfg->etscfg.tsatable)))
- dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
}
/* Check if PFC configuration has changed */
@@ -4434,7 +4529,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
&old_cfg->pfc,
sizeof(new_cfg->pfc))) {
need_reconfig = true;
- dev_info(&pf->pdev->dev, "PFC config change detected.\n");
+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
}
/* Check if APP Table has changed */
@@ -4442,7 +4537,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
&old_cfg->app,
sizeof(new_cfg->app))) {
need_reconfig = true;
- dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
return need_reconfig;
@@ -4492,7 +4587,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
- dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+ dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
goto exit;
}
@@ -4550,8 +4645,8 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
struct i40e_vf *vf;
u16 vf_id;
- dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
- __func__, queue, qtx_ctl);
+ dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
+ queue, qtx_ctl);
/* Queue belongs to VF, find the VF and issue VF reset */
if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
@@ -4581,6 +4676,54 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
}
/**
+ * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
+ * @pf: board private structure
+ **/
+int i40e_get_current_fd_count(struct i40e_pf *pf)
+{
+ int val, fcnt_prog;
+ val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+ fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
+ ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ return fcnt_prog;
+}
+
+/**
+ * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
+ * @pf: board private structure
+ **/
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
+{
+ u32 fcnt_prog, fcnt_avail;
+
+ /* Check if, FD SB or ATR was auto disabled and if there is enough room
+ * to re-enable
+ */
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return;
+ fcnt_prog = i40e_get_current_fd_count(pf);
+ fcnt_avail = pf->hw.fdir_shared_filter_count +
+ pf->fdir_pf_filter_count;
+ if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+ }
+ }
+ /* Wait for some more space to be available to turn on ATR */
+ if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ }
+ }
+}
+
+/**
* i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
* @pf: board private structure
**/
@@ -4589,11 +4732,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
return;
- pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
-
/* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &pf->state))
return;
+ i40e_fdir_check_and_reenable(pf);
+
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
}
/**
@@ -4903,7 +5049,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
event.msg_size);
break;
case i40e_aqc_opc_lldp_update_mib:
- dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+ dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
#ifdef CONFIG_I40E_DCB
rtnl_lock();
ret = i40e_handle_lldp_event(pf, &event);
@@ -4911,7 +5057,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
#endif /* CONFIG_I40E_DCB */
break;
case i40e_aqc_opc_event_lan_overflow:
- dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+ dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
i40e_handle_lan_overflow_event(pf, &event);
break;
case i40e_aqc_opc_send_msg_to_peer:
@@ -4936,6 +5082,31 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
}
/**
+ * i40e_verify_eeprom - make sure eeprom is good to use
+ * @pf: board private structure
+ **/
+static void i40e_verify_eeprom(struct i40e_pf *pf)
+{
+ int err;
+
+ err = i40e_diag_eeprom_test(&pf->hw);
+ if (err) {
+ /* retry in case of garbage read */
+ err = i40e_diag_eeprom_test(&pf->hw);
+ if (err) {
+ dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
+ err);
+ set_bit(__I40E_BAD_EEPROM, &pf->state);
+ }
+ }
+
+ if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
+ clear_bit(__I40E_BAD_EEPROM, &pf->state);
+ }
+}
+
+/**
* i40e_reconstitute_veb - rebuild the VEB and anything connected to it
* @veb: pointer to the VEB instance
*
@@ -5053,6 +5224,12 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
/* increment MSI-X count because current FW skips one */
pf->hw.func_caps.num_msix_vectors++;
+ if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
+ (pf->hw.aq.fw_maj_ver < 2)) {
+ pf->hw.func_caps.num_msix_vectors++;
+ pf->hw.func_caps.num_msix_vectors_vf++;
+ }
+
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -5132,9 +5309,9 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
err = i40e_up_complete(vsi);
if (err)
goto err_up_complete;
+ clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
}
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
return;
err_up_complete:
@@ -5157,6 +5334,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
{
int i;
+ i40e_fdir_filter_exit(pf);
for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
i40e_vsi_release(pf->vsi[i]);
@@ -5181,7 +5359,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
return 0;
- dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+ dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
if (i40e_check_asq_alive(hw))
i40e_vc_notify_reset(pf);
@@ -5228,7 +5406,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (test_bit(__I40E_DOWN, &pf->state))
goto end_core_reset;
- dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
+ dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
@@ -5237,6 +5415,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
goto end_core_reset;
}
+ /* re-verify the eeprom if we just had an EMP reset */
+ if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
+ clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ i40e_verify_eeprom(pf);
+ }
+
ret = i40e_get_capabilities(pf);
if (ret) {
dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5278,7 +5462,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
* try to recover minimal use by getting the basic PF VSI working.
*/
if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
- dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
+ dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
/* find the one VEB connected to the MAC, and find orphans */
for (v = 0; v < I40E_MAX_VEB; v++) {
if (!pf->veb[v])
@@ -5331,6 +5515,11 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
+ if (pf->num_alloc_vfs) {
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ i40e_reset_vf(&pf->vf[v], true);
+ }
+
/* tell the firmware that we're starting */
dv.major_version = DRV_VERSION_MAJOR;
dv.minor_version = DRV_VERSION_MINOR;
@@ -5338,7 +5527,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
dv.subbuild_version = 0;
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
- dev_info(&pf->pdev->dev, "PF reset done\n");
+ dev_info(&pf->pdev->dev, "reset complete\n");
end_core_reset:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5387,7 +5576,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
>> I40E_GL_MDET_TX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
- "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
+ "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
event, queue, func);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
@@ -5401,7 +5590,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
>> I40E_GL_MDET_RX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
- "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
+ "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
event, queue, func);
wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
mdd_detected = true;
@@ -5850,37 +6039,16 @@ err_out:
**/
static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
{
- int err = 0;
-
- pf->num_msix_entries = 0;
- while (vectors >= I40E_MIN_MSIX) {
- err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
- if (err == 0) {
- /* good to go */
- pf->num_msix_entries = vectors;
- break;
- } else if (err < 0) {
- /* total failure */
- dev_info(&pf->pdev->dev,
- "MSI-X vector reservation failed: %d\n", err);
- vectors = 0;
- break;
- } else {
- /* err > 0 is the hint for retry */
- dev_info(&pf->pdev->dev,
- "MSI-X vectors wanted %d, retrying with %d\n",
- vectors, err);
- vectors = err;
- }
- }
-
- if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+ vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+ I40E_MIN_MSIX, vectors);
+ if (vectors < 0) {
dev_info(&pf->pdev->dev,
- "Couldn't get enough vectors, only %d available\n",
- vectors);
+ "MSI-X vector reservation failed: %d\n", vectors);
vectors = 0;
}
+ pf->num_msix_entries = vectors;
+
return vectors;
}
@@ -5942,7 +6110,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else if (vec == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
- dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
+ dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
@@ -5978,13 +6146,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/**
- * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
{
struct i40e_q_vector *q_vector;
@@ -6010,13 +6178,13 @@ static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
}
/**
- * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
+ * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
**/
-static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
+static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int v_idx, num_q_vectors;
@@ -6031,7 +6199,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
return -EINVAL;
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_alloc_q_vector(vsi, v_idx);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
}
@@ -6071,7 +6239,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
- dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
+ dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
err = pci_enable_msi(pf->pdev);
if (err) {
dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
@@ -6080,7 +6248,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
}
if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
- dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+ dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
/* track first vector for misc interrupts */
err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
@@ -6107,7 +6275,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
i40e_intr, 0, pf->misc_int_name, pf);
if (err) {
dev_info(&pf->pdev->dev,
- "request_irq for msix_misc failed: %d\n", err);
+ "request_irq for %s failed: %d\n",
+ pf->misc_int_name, err);
return -EFAULT;
}
}
@@ -6258,15 +6427,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- dev_info(&pf->pdev->dev,
- "Flow Director ATR mode Enabled\n");
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- dev_info(&pf->pdev->dev,
- "Flow Director Side Band mode Enabled\n");
} else {
dev_info(&pf->pdev->dev,
- "Flow Director Side Band mode Disabled in MFP mode\n");
+ "Flow Director Sideband mode Disabled in MFP mode\n");
}
pf->fdir_pf_filter_count =
pf->hw.func_caps.fd_filters_guaranteed;
@@ -6287,9 +6452,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
- dev_info(&pf->pdev->dev,
- "Number of VFs being requested for PF[%d] = %d\n",
- pf->hw.pf_id, pf->num_req_vfs);
}
#endif /* CONFIG_PCI_IOV */
pf->eeprom_version = 0xDEAD;
@@ -6326,6 +6488,39 @@ sw_init_done:
}
/**
+ * i40e_set_ntuple - set the ntuple feature flag and take action
+ * @pf: board private structure to initialize
+ * @features: the feature set that the stack is suggesting
+ *
+ * returns a bool to indicate if reset needs to happen
+ **/
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
+{
+ bool need_reset = false;
+
+ /* Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if (features & NETIF_F_NTUPLE) {
+ /* Enable filters and mark for reset */
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ need_reset = true;
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ } else {
+ /* turn off filters, mark for reset and clear SW filter list */
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ need_reset = true;
+ i40e_fdir_filter_exit(pf);
+ }
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ /* if ATR was disabled it can be re-enabled. */
+ if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ }
+ return need_reset;
+}
+
+/**
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -6335,12 +6530,19 @@ static int i40e_set_features(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ bool need_reset;
if (features & NETIF_F_HW_VLAN_CTAG_RX)
i40e_vlan_stripping_enable(vsi);
else
i40e_vlan_stripping_disable(vsi);
+ need_reset = i40e_set_ntuple(pf, features);
+
+ if (need_reset)
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
return 0;
}
@@ -6464,6 +6666,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
.ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
+ .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
#ifdef CONFIG_I40E_VXLAN
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -6495,10 +6698,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM |
NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_TSO |
- NETIF_F_SG;
+ NETIF_F_TSO;
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
@@ -6512,6 +6714,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXCSUM |
+ NETIF_F_NTUPLE |
NETIF_F_RXHASH |
0;
@@ -6771,8 +6974,6 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
if (vsi->netdev) {
/* results in a call to i40e_close() */
unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
}
} else {
if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
@@ -6791,6 +6992,10 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_delete(vsi);
i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
i40e_vsi_clear_rings(vsi);
i40e_vsi_clear(vsi);
@@ -6845,13 +7050,12 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
}
if (vsi->base_vector) {
- dev_info(&pf->pdev->dev,
- "VSI %d has non-zero base vector %d\n",
+ dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
vsi->seid, vsi->base_vector);
return -EEXIST;
}
- ret = i40e_alloc_q_vectors(vsi);
+ ret = i40e_vsi_alloc_q_vectors(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"failed to allocate %d q_vector for VSI %d, ret=%d\n",
@@ -6865,7 +7069,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev,
- "failed to get q tracking for VSI %d, err=%d\n",
+ "failed to get queue tracking for VSI %d, err=%d\n",
vsi->seid, vsi->base_vector);
i40e_vsi_free_q_vectors(vsi);
ret = -ENOENT;
@@ -7822,6 +8026,44 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
return 0;
}
+#define INFO_STRING_LEN 255
+static void i40e_print_features(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ char *buf, *string;
+
+ string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
+ if (!string) {
+ dev_err(&pf->pdev->dev, "Features string allocation failed\n");
+ return;
+ }
+
+ buf = string;
+
+ buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
+#ifdef CONFIG_PCI_IOV
+ buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
+#endif
+ buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
+ pf->vsi[pf->lan_vsi]->num_queue_pairs);
+
+ if (pf->flags & I40E_FLAG_RSS_ENABLED)
+ buf += sprintf(buf, "RSS ");
+ buf += sprintf(buf, "FDir ");
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+ buf += sprintf(buf, "ATR ");
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
+ buf += sprintf(buf, "NTUPLE ");
+ if (pf->flags & I40E_FLAG_DCB_ENABLED)
+ buf += sprintf(buf, "DCB ");
+ if (pf->flags & I40E_FLAG_PTP)
+ buf += sprintf(buf, "PTP ");
+
+ BUG_ON(buf > (string + INFO_STRING_LEN));
+ dev_info(&pf->pdev->dev, "%s\n", string);
+ kfree(string);
+}
+
/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
@@ -7848,17 +8090,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
/* set up for high or low dma */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /* coherent mask for the same size will always succeed if
- * dma_set_mask does
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- } else {
- dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
- err = -EIO;
- goto err_dma;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
}
/* set up pci connections */
@@ -7946,13 +8185,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_adminq(hw);
dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
- if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
- >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
- dev_info(&pdev->dev,
- "warning: NVM version not supported, supported version: %02x.%02x\n",
- I40E_CURRENT_NVM_VERSION_HI,
- I40E_CURRENT_NVM_VERSION_LO);
- }
if (err) {
dev_info(&pdev->dev,
"init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7961,6 +8193,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_verify_eeprom(pf);
+
i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
@@ -8062,7 +8296,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
- (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
u32 val;
/* disable link interrupts for VFs */
@@ -8070,6 +8305,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
i40e_flush(hw);
+
+ if (pci_num_vf(pdev)) {
+ dev_info(&pdev->dev,
+ "Active VFs found, allocating resources.\n");
+ err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
+ if (err)
+ dev_info(&pdev->dev,
+ "Error %d allocating resources for existing VFs\n",
+ err);
+ }
}
pfs_found++;
@@ -8092,7 +8337,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_set_pci_config_data(hw, link_status);
- dev_info(&pdev->dev, "PCI Express: %s %s\n",
+ dev_info(&pdev->dev, "PCI-Express: %s %s\n",
(hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
@@ -8109,6 +8354,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
}
+ /* print a string summarizing features */
+ i40e_print_features(pf);
+
return 0;
/* Unwind what we've done if something failed in the setup */
@@ -8165,16 +8413,16 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_ptp_stop(pf);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
- }
-
/* no more scheduling of any task */
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
+
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 73f95b081927..262bdf11d221 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -27,14 +27,14 @@
#include "i40e_prototype.h"
/**
- * i40e_init_nvm_ops - Initialize NVM function pointers.
- * @hw: pointer to the HW structure.
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
*
- * Setups the function pointers and the NVM info structure. Should be called
- * once per NVM initialization, e.g. inside the i40e_init_shared_code().
- * Please notice that the NVM term is used here (& in all methods covered
- * in this file) as an equivalent of the FLASH part mapped into the SR.
- * We are accessing FLASH always thru the Shadow RAM.
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
**/
i40e_status i40e_init_nvm(struct i40e_hw *hw)
{
@@ -49,16 +49,16 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
gens = rd32(hw, I40E_GLNVM_GENS);
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
- /* Switching to words (sr_size contains power of 2KB). */
+ /* Switching to words (sr_size contains power of 2KB) */
nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
- /* Check if we are in the normal or blank NVM programming mode. */
+ /* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
- if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */
- /* Max NVM timeout. */
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+ /* Max NVM timeout */
nvm->timeout = I40E_MAX_NVM_TIMEOUT;
nvm->blank_nvm_mode = false;
- } else { /* Blank programming mode. */
+ } else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
ret_code = I40E_ERR_NVM_BLANK_MODE;
hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
@@ -68,12 +68,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
}
/**
- * i40e_acquire_nvm - Generic request for acquiring the NVM ownership.
- * @hw: pointer to the HW structure.
- * @access: NVM access type (read or write).
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
*
- * This function will request NVM ownership for reading
- * via the proper Admin Command.
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
**/
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access)
@@ -87,20 +87,20 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
0, &time, NULL);
- /* Reading the Global Device Timer. */
+ /* Reading the Global Device Timer */
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
- /* Store the timeout. */
+ /* Store the timeout */
hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
if (ret_code) {
- /* Set the polling timeout. */
+ /* Set the polling timeout */
if (time > I40E_MAX_NVM_TIMEOUT)
timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ gtime;
else
timeout = hw->nvm.hw_semaphore_timeout;
- /* Poll until the current NVM owner timeouts. */
+ /* Poll until the current NVM owner timeouts */
while (gtime < timeout) {
usleep_range(10000, 20000);
ret_code = i40e_aq_request_resource(hw,
@@ -128,10 +128,10 @@ i40e_i40e_acquire_nvm_exit:
}
/**
- * i40e_release_nvm - Generic request for releasing the NVM ownership.
- * @hw: pointer to the HW structure.
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
*
- * This function will release NVM resource via the proper Admin Command.
+ * This function will release NVM resource via the proper Admin Command.
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
@@ -140,17 +140,17 @@ void i40e_release_nvm(struct i40e_hw *hw)
}
/**
- * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit.
- * @hw: pointer to the HW structure.
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
*
- * Polls the SRCTL Shadow RAM register done bit.
+ * Polls the SRCTL Shadow RAM register done bit.
**/
static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 srctl, wait_cnt;
- /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
srctl = rd32(hw, I40E_GLNVM_SRCTL);
if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
@@ -165,12 +165,12 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_word - Reads Shadow RAM
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @data: word read from the Shadow RAM.
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
*
- * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
@@ -184,15 +184,15 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
goto read_nvm_exit;
}
- /* Poll the done bit first. */
+ /* Poll the done bit first */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
- /* Write the address and start reading. */
+ /* Write the address and start reading */
sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
(1 << I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
- /* Poll I40E_GLNVM_SRCTL until the done bit is set. */
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
@@ -210,16 +210,15 @@ read_nvm_exit:
}
/**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer.
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @words: number of words to read (in) &
- * number of words read before the NVM ownership timeout (out).
- * @data: words read from the Shadow RAM.
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
*
- * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- * method. The buffer read is preceded by the NVM ownership take
- * and followed by the release.
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
**/
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
@@ -227,7 +226,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
i40e_status ret_code = 0;
u16 index, word;
- /* Loop thru the selected region. */
+ /* Loop thru the selected region */
for (word = 0; word < *words; word++) {
index = offset + word;
ret_code = i40e_read_nvm_word(hw, index, &data[word]);
@@ -235,21 +234,21 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
break;
}
- /* Update the number of words read from the Shadow RAM. */
+ /* Update the number of words read from the Shadow RAM */
*words = word;
return ret_code;
}
/**
- * i40e_calc_nvm_checksum - Calculates and returns the checksum
- * @hw: pointer to hardware structure
- * @checksum: pointer to the checksum
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
*
- * This function calculate SW Checksum that covers the whole 64kB shadow RAM
- * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
- * is customer specific and unknown. Therefore, this function skips all maximum
- * possible size of VPD (1kB).
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
**/
static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
@@ -311,12 +310,12 @@ i40e_calc_nvm_checksum_exit:
}
/**
- * i40e_validate_nvm_checksum - Validate EEPROM checksum
- * @hw: pointer to hardware structure
- * @checksum: calculated checksum
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
*
- * Performs checksum calculation and validates the NVM SW checksum. If the
- * caller does not need checksum, the value can be NULL.
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
**/
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index ed91f93ede2b..9cd57e617959 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -231,6 +231,13 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40e_ptype_lookup[ptype];
+}
+
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d4bb482b1a7f..0f5d96ad281d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -25,6 +25,7 @@
******************************************************************************/
#include "i40e.h"
+#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -39,11 +40,12 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_program_fdir_filter - Program a Flow Director filter
- * @fdir_input: Packet data that will be filter parameters
+ * @fdir_data: Packet data that will be filter parameters
+ * @raw_packet: the pre-allocated packet buffer for FDir
* @pf: The pf pointer
* @add: True for add/update, False for remove
**/
-int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add)
{
struct i40e_filter_program_desc *fdir_desc;
@@ -68,8 +70,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
tx_ring = vsi->tx_rings[0];
dev = tx_ring->dev;
- dma = dma_map_single(dev, fdir_data->raw_packet,
- I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+ dma = dma_map_single(dev, raw_packet,
+ I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto dma_fail;
@@ -132,14 +134,14 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
/* record length, and DMA address */
- dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+ dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
dma_unmap_addr_set(tx_buf, dma, dma);
tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+ build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
/* set the timestamp */
tx_buf->time_stamp = jiffies;
@@ -161,26 +163,329 @@ dma_fail:
return -1;
}
+#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
+/**
+ * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct udphdr *udp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
+
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+ udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->daddr = fd_data->dst_ip[0];
+ udp->dest = fd_data->dst_port;
+ ip->saddr = fd_data->src_ip[0];
+ udp->source = fd_data->src_port;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
+ i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+/**
+ * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct tcphdr *tcp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ /* Dummy packet */
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
+ 0x0, 0x72, 0, 0, 0, 0};
+
+ memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
+
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+ tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->daddr = fd_data->dst_ip[0];
+ tcp->dest = fd_data->dst_port;
+ ip->saddr = fd_data->src_ip[0];
+ tcp->source = fd_data->src_port;
+
+ if (add) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ }
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Always returns -EOPNOTSUPP
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ return -EOPNOTSUPP;
+}
+
+#define I40E_IP_DUMMY_PACKET_LEN 34
+/**
+ * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+
+ ip->saddr = fd_data->src_ip[0];
+ ip->daddr = fd_data->dst_ip[0];
+ ip->protocol = 0;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir - Build raw packets to add/del fdir filter
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ * @add: true adds a filter, false removes it
+ *
+ **/
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 *raw_packet;
+ int ret;
+
+ /* Populate the Flow Director that we have at the moment
+ * and allocate the raw packet buffer for the calling functions
+ */
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+
+ switch (input->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
+ add);
+ break;
+ case UDP_V4_FLOW:
+ ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
+ add);
+ break;
+ case SCTP_V4_FLOW:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
+ add);
+ break;
+ case IPV4_FLOW:
+ ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
+ add);
+ break;
+ case IP_USER_FLOW:
+ switch (input->ip4_proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcpv4(vsi, input,
+ raw_packet, add);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udpv4(vsi, input,
+ raw_packet, add);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input,
+ raw_packet, add);
+ break;
+ default:
+ ret = i40e_add_del_fdir_ipv4(vsi, input,
+ raw_packet, add);
+ break;
+ }
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "Could not specify spec type %d",
+ input->flow_type);
+ ret = -EINVAL;
+ }
+
+ kfree(raw_packet);
+ return ret;
+}
+
/**
* i40e_fd_handle_status - check the Programming Status for FD
* @rx_ring: the Rx ring for this descriptor
- * @qw: the descriptor data
+ * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
* @prog_id: the id originally used for programming
*
* This is used to verify if the FD programming or invalidation
* requested by SW to the HW is successful or not and take actions accordingly.
**/
-static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, u8 prog_id)
{
- struct pci_dev *pdev = rx_ring->vsi->back->pdev;
+ struct i40e_pf *pf = rx_ring->vsi->back;
+ struct pci_dev *pdev = pf->pdev;
+ u32 fcnt_prog, fcnt_avail;
u32 error;
+ u64 qw;
+ qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
- /* for now just print the Status */
- dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
- prog_id, error);
+ if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
+ rx_desc->wb.qword0.hi_dword.fd_id);
+
+ /* filter programming failed most likely due to table full */
+ fcnt_prog = i40e_get_current_fd_count(pf);
+ fcnt_avail = pf->hw.fdir_shared_filter_count +
+ pf->fdir_pf_filter_count;
+
+ /* If ATR is running fcnt_prog can quickly change,
+ * if we are very close to full, it makes sense to disable
+ * FD ATR/SB and then re-enable it when there is room.
+ */
+ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
+ /* Turn off ATR first */
+ if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
+ pf->auto_disable_flags |=
+ I40E_FLAG_FD_ATR_ENABLED;
+ pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
+ } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+ pf->auto_disable_flags |=
+ I40E_FLAG_FD_SB_ENABLED;
+ pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
+ }
+ } else {
+ dev_info(&pdev->dev, "FD filter programming error");
+ }
+ } else if (error ==
+ (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
+ rx_desc->wb.qword0.hi_dword.fd_id);
+ }
}
/**
@@ -315,6 +620,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
* @budget: how many cleans we're allowed
@@ -325,6 +644,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
unsigned int total_packets = 0;
unsigned int total_bytes = 0;
@@ -333,6 +653,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_desc = I40E_TX_DESC(tx_ring, i);
i -= tx_ring->count;
+ tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
+
do {
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
@@ -343,9 +665,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
- /* if the descriptor isn't done, no work yet to do */
- if (!(eop_desc->cmd_type_offset_bsz &
- cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
break;
/* clear next_to_watch to prevent false hangs */
@@ -577,7 +898,7 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
- i40e_fd_handle_status(rx_ring, qw, id);
+ i40e_fd_handle_status(rx_ring, rx_desc, id);
}
/**
@@ -601,6 +922,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ /* add u32 for head writeback, align after this takes care of
+ * guaranteeing this is at least one cache line in size
+ */
+ tx_ring->size += sizeof(u32);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -892,7 +1217,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* likely incorrect csum if alternate IP extention headers found */
+ /* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
return;
@@ -956,6 +1281,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
}
/**
+ * i40e_ptype_to_hash - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return PKT_HASH_TYPE_NONE;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ return PKT_HASH_TYPE_L4;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ return PKT_HASH_TYPE_L3;
+ else
+ return PKT_HASH_TYPE_L2;
+}
+
+/**
* i40e_clean_rx_irq - Reclaim resources after receive completes
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
@@ -972,8 +1320,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
+ u8 rx_ptype;
u64 qword;
- u16 rx_ptype;
+
+ if (budget <= 0)
+ return 0;
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -1087,7 +1438,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
goto next_desc;
}
- skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+ i40e_ptype_to_hash(rx_ptype));
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
@@ -1246,8 +1598,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!tx_ring->atr_sample_rate)
return;
- tx_ring->atr_count++;
-
/* snag network header to get L4 type and address */
hdr.network = skb_network_header(skb);
@@ -1269,8 +1619,17 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen);
- /* sample on all syn/fin packets or once every atr sample rate */
- if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
+ /* Due to lack of space, no more new filters can be programmed */
+ if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ return;
+
+ tx_ring->atr_count++;
+
+ /* sample on all syn/fin/rst packets or once every atr sample rate */
+ if (!th->fin &&
+ !th->syn &&
+ !th->rst &&
+ (tx_ring->atr_count < tx_ring->atr_sample_rate))
return;
tx_ring->atr_count = 0;
@@ -1294,7 +1653,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
- dtype_cmd |= th->fin ?
+ dtype_cmd |= (th->fin || th->rst) ?
(I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
(I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
@@ -1596,7 +1955,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
struct i40e_tx_context_desc *context_desc;
int i = tx_ring->next_to_use;
- if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+ !cd_tunneling && !cd_l2tag2)
return;
/* grab the next descriptor */
@@ -1707,9 +2067,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+ /* Place RS bit on last descriptor of any packet that spans across the
+ * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+ */
+#define WB_STRIDE 0x3
+ if (((i & WB_STRIDE) != WB_STRIDE) &&
+ (first <= &tx_ring->tx_bi[i]) &&
+ (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ } else {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ }
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
@@ -1812,7 +2186,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
- * + 2 desc gap to keep tail from touching head,
+ * + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
@@ -1823,7 +2197,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += skb_shinfo(skb)->nr_frags;
#endif
count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 181a825d3160..71a968fe557f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -91,6 +91,7 @@ enum i40e_debug_mask {
I40E_DEBUG_FLOW = 0x00000200,
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -458,6 +459,10 @@ union i40e_32byte_rx_desc {
union {
__le32 rss; /* RSS Hash */
__le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
} hi_dword;
} qword0;
struct {
@@ -698,7 +703,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
enum i40e_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
- I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
@@ -1010,6 +1015,11 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* EEE LPI */
+ bool tx_lpi_status;
+ bool rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
};
/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b9d1c1c8ca5a..02c11a7f7d29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -69,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
{
struct i40e_pf *pf = vf->pf;
- return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
+ return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
}
/***********************vf resource mgmt routines*****************/
@@ -126,8 +126,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
else
reg_idx = I40E_VPINT_LNKLSTN(
- (pf->hw.func_caps.num_msix_vectors_vf
- * vf->vf_id) + (vector_id - 1));
+ ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
+ (vector_id - 1));
if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
/* Special case - No queues mapped on this vector */
@@ -230,6 +230,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
+ tx_ctx.head_wb_ena = 1;
+ tx_ctx.head_wb_addr = info->dma_ring_addr +
+ (info->ring_len * sizeof(struct i40e_tx_desc));
/* clear the context in the HMC */
ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -408,18 +411,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
"Could not allocate VF broadcast filter\n");
}
- if (!f) {
- dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
- ret = -ENOMEM;
- goto error_alloc_vsi_res;
- }
-
/* program mac filter */
ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
+ if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
- goto error_alloc_vsi_res;
- }
error_alloc_vsi_res:
return ret;
@@ -514,7 +509,8 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_index = 0;
vf->lan_vsi_id = 0;
}
- msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
/* disable interrupts so the VF starts in a known state */
for (i = 0; i < msix_vf; i++) {
/* format is same for both registers */
@@ -679,9 +675,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
complete_reset:
/* reallocate vf resources to reset the VSI state */
i40e_free_vf_res(vf);
- mdelay(10);
i40e_alloc_vf_res(vf);
i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -847,7 +843,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
*
* allocate vf resources
**/
-static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
{
struct i40e_vf *vfs;
int i, ret = 0;
@@ -855,16 +851,18 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* Disable interrupt 0 so we don't try to handle the VFLR. */
i40e_irq_dynamic_disable_icr0(pf);
- ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "pci_enable_sriov failed with error %d!\n", ret);
- pf->num_alloc_vfs = 0;
- goto err_iov;
+ /* Check to see if we're just allocating resources for extant VFs */
+ if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to enable SR-IOV, error %d.\n", ret);
+ pf->num_alloc_vfs = 0;
+ goto err_iov;
+ }
}
-
/* allocate memory */
- vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
+ vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
ret = -ENOMEM;
goto err_alloc;
@@ -1776,7 +1774,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
- int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
struct i40e_vf *vf;
int ret;
@@ -1873,7 +1871,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
- i40e_reset_vf(vf, true);
+ if (!test_bit(__I40E_DOWN, &pf->state))
+ i40e_reset_vf(vf, true);
}
}
@@ -1924,15 +1923,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
void i40e_vc_notify_link_state(struct i40e_pf *pf)
{
struct i40e_virtchnl_pf_event pfe;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf = pf->vf;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ int i;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
- pfe.event_data.link_event.link_status =
- pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
- pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
-
- i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
- (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (vf->link_forced) {
+ pfe.event_data.link_event.link_status = vf->link_up;
+ pfe.event_data.link_event.link_speed =
+ (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ } else {
+ pfe.event_data.link_event.link_status =
+ ls->link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed = ls->link_speed;
+ }
+ i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe),
+ NULL);
+ vf++;
+ }
}
/**
@@ -2197,3 +2209,64 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
error_param:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @link: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_virtchnl_pf_event pfe;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ pfe.event_data.link_event.link_status =
+ pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed =
+ pf->hw.phy.link_info.link_speed;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ pfe.event_data.link_event.link_status = true;
+ pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ pfe.event_data.link_event.link_status = false;
+ pfe.event_data.link_event.link_speed = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_out;
+ }
+ /* Notify the VF of its new link state */
+ i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe), NULL);
+
+error_out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index cc1feee36e12..389c47f396d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,10 +98,13 @@ struct i40e_vf {
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
+ bool link_forced;
+ bool link_up; /* only valid if vf link is forced */
};
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
@@ -115,6 +118,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index f7cea1bca38d..97662b6bd98a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
- __le32 tenant_id ;
+ __le32 tenant_id;
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7b13953b28c4..ae084378faab 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -160,6 +160,372 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
}
+/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40evf_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
@@ -199,8 +565,7 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
details.async = true;
cmd_details = &details;
}
- status = i40evf_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
- msglen, cmd_details);
+ status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
return status;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 7841573a58c9..97ab8c2b76f8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -63,6 +63,13 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40evf_ptype_lookup[ptype];
+}
+
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ffdb01d853db..53be5f44d015 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -24,6 +24,7 @@
#include <linux/prefetch.h>
#include "i40evf.h"
+#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -169,6 +170,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
* @budget: how many cleans we're allowed
@@ -179,6 +194,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
unsigned int total_packets = 0;
unsigned int total_bytes = 0;
@@ -187,6 +203,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_desc = I40E_TX_DESC(tx_ring, i);
i -= tx_ring->count;
+ tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
+
do {
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
@@ -197,9 +215,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
- /* if the descriptor isn't done, no work yet to do */
- if (!(eop_desc->cmd_type_offset_bsz &
- cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
break;
/* clear next_to_watch to prevent false hangs */
@@ -431,6 +448,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ /* add u32 for head writeback, align after this takes care of
+ * guaranteeing this is at least one cache line in size
+ */
+ tx_ring->size += sizeof(u32);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -722,7 +743,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* likely incorrect csum if alternate IP extention headers found */
+ /* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
return;
@@ -786,6 +807,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
}
/**
+ * i40e_ptype_to_hash - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return PKT_HASH_TYPE_NONE;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ return PKT_HASH_TYPE_L4;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ return PKT_HASH_TYPE_L3;
+ else
+ return PKT_HASH_TYPE_L2;
+}
+
+/**
* i40e_clean_rx_irq - Reclaim resources after receive completes
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
@@ -802,13 +846,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
+ u8 rx_ptype;
u64 qword;
- u16 rx_ptype;
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
union i40e_rx_desc *next_rxd;
@@ -912,7 +956,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
goto next_desc;
}
- skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+ i40e_ptype_to_hash(rx_ptype));
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -1241,7 +1286,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
struct i40e_tx_context_desc *context_desc;
int i = tx_ring->next_to_use;
- if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+ !cd_tunneling && !cd_l2tag2)
return;
/* grab the next descriptor */
@@ -1352,9 +1398,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+ /* Place RS bit on last descriptor of any packet that spans across the
+ * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+ */
+#define WB_STRIDE 0x3
+ if (((i & WB_STRIDE) != WB_STRIDE) &&
+ (first <= &tx_ring->tx_bi[i]) &&
+ (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ } else {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ }
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
@@ -1457,7 +1517,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
- * + 2 desc gap to keep tail from touching head,
+ * + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
@@ -1468,7 +1528,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += skb_shinfo(skb)->nr_frags;
#endif
count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3bffac06592f..4673b3381edd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -64,8 +64,6 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define ETH_ALEN 6
-
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
@@ -90,6 +88,7 @@ enum i40e_debug_mask {
I40E_DEBUG_FLOW = 0x00000200,
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -466,6 +465,10 @@ union i40e_32byte_rx_desc {
union {
__le32 rss; /* RSS Hash */
__le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
} hi_dword;
} qword0;
struct {
@@ -706,7 +709,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
enum i40e_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
- I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
@@ -1018,6 +1021,11 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* EEE LPI */
+ bool tx_lpi_status;
+ bool rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
};
/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index ff6529b288a1..807807d62387 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,6 @@
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/udp.h>
-#include <linux/sctp.h>
-
#include "i40e_type.h"
#include "i40e_virtchnl.h"
@@ -164,15 +162,14 @@ struct i40evf_vlan_filter {
/* Driver state. The order of these is important! */
enum i40evf_state_t {
__I40EVF_STARTUP, /* driver loaded, probe complete */
- __I40EVF_FAILED, /* PF communication failed. Fatal. */
__I40EVF_REMOVE, /* driver is being unloaded */
__I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__I40EVF_INIT_SW, /* got resources, setting up structs */
+ __I40EVF_RESETTING, /* in reset */
/* Below here, watchdog is running */
__I40EVF_DOWN, /* ready, can be opened */
__I40EVF_TESTING, /* in ethtool self-test */
- __I40EVF_RESETTING, /* in reset */
__I40EVF_RUNNING, /* opened, working */
};
@@ -185,47 +182,25 @@ enum i40evf_critical_section_t {
/* board specific private data structure */
struct i40evf_adapter {
struct timer_list watchdog_timer;
- struct vlan_group *vlgrp;
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work init_task;
struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
struct list_head vlan_filter_list;
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
-
- /* Interrupt Throttle Rate */
- u32 itr_setting;
- u16 eitr_low;
- u16 eitr_high;
+ char misc_vector_name[IFNAMSIZ + 9];
/* TX */
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
- u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
struct list_head mac_filter_list;
-#ifdef DEBUG
- bool detect_tx_hung;
-#endif /* DEBUG */
/* RX */
struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
- int txd_count;
- int rxd_count;
u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
- u64 non_eop_descs;
int num_msix_vectors;
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
-
- u32 init_state;
- volatile unsigned long flags;
+ u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
@@ -234,6 +209,9 @@ struct i40evf_adapter {
#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
+#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
+#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10)
/* duplcates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
@@ -251,21 +229,19 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
- /* structs defined in i40e_vf.h */
- struct i40e_hw hw;
+ struct i40e_hw hw; /* defined in i40e_type.h */
enum i40evf_state_t state;
volatile unsigned long crit_section;
- u64 tx_busy;
struct work_struct watchdog_task;
bool netdev_registered;
- bool dev_closed;
bool link_up;
enum i40e_virtchnl_ops current_op;
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -276,11 +252,6 @@ struct i40evf_adapter {
u32 aq_wait_count;
};
-struct i40evf_info {
- enum i40e_mac_type mac;
- unsigned int flags;
-};
-
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
@@ -315,6 +286,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter);
void i40evf_del_vlans(struct i40evf_adapter *adapter);
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_request_reset(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index b0b1f4bf5ac0..8b0db1ce179c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
+ int i;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == adapter->txd_count) &&
- (new_rx_count == adapter->rxd_count))
+ if ((new_tx_count == adapter->tx_rings[0]->count) &&
+ (new_rx_count == adapter->rx_rings[0]->count))
return 0;
- adapter->txd_count = new_tx_count;
- adapter->rxd_count = new_rx_count;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->tx_rings[0]->count = new_tx_count;
+ adapter->rx_rings[0]->count = new_rx_count;
+ }
if (netif_running(netdev))
i40evf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f5caf4419243..e35e66ffa782 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.11"
+#define DRV_VERSION "0.9.16"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
- "Copyright (c) 2013 Intel Corporation.";
+ "Copyright (c) 2013 - 2014 Intel Corporation.";
/* i40evf_pci_tbl - PCI Device ID Table
*
@@ -167,9 +167,11 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
-
- /* Do the reset outside of interrupt context */
- schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
}
/**
@@ -211,6 +213,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
int i;
struct i40e_hw *hw = &adapter->hw;
+ if (!adapter->msix_entries)
+ return;
+
for (i = 1; i < adapter->num_msix_vectors; i++) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
synchronize_irq(adapter->msix_entries[i].vector);
@@ -511,12 +516,14 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int err;
- sprintf(adapter->name[0], "i40evf:mbx");
+ sprintf(adapter->misc_vector_name, "i40evf:mbx");
err = request_irq(adapter->msix_entries[0].vector,
- &i40evf_msix_aq, 0, adapter->name[0], netdev);
+ &i40evf_msix_aq, 0,
+ adapter->misc_vector_name, netdev);
if (err) {
dev_err(&adapter->pdev->dev,
- "request_irq for msix_aq failed: %d\n", err);
+ "request_irq for %s failed: %d\n",
+ adapter->misc_vector_name, err);
free_irq(adapter->msix_entries[0].vector, netdev);
}
return err;
@@ -963,16 +970,23 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40evf_mac_filter *f;
- /* remove all MAC filters from the VSI */
+ /* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
}
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- /* disable receives */
- adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- msleep(20);
-
+ /* remove all VLAN filters */
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ f->remove = true;
+ }
+ if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
+ adapter->state != __I40EVF_RESETTING) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ /* disable receives */
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ msleep(20);
+ }
netif_tx_disable(netdev);
netif_tx_stop_all_queues(netdev);
@@ -1124,8 +1138,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
* than CPU's. So let's be conservative and only ask for
* (roughly) twice the number of vectors as there are CPU's.
*/
- v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
- v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1);
+ v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
+ v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter.
@@ -1291,19 +1305,47 @@ static void i40evf_watchdog_task(struct work_struct *work)
watchdog_task);
struct i40e_hw *hw = &adapter->hw;
- if (adapter->state < __I40EVF_DOWN)
+ if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto restart_watchdog;
+
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ dev_info(&adapter->pdev->dev, "Checking for redemption\n");
+ if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
+ /* A chance for redemption! */
+ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
+ adapter->state = __I40EVF_STARTUP;
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ schedule_delayed_work(&adapter->init_task, 10);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ /* Don't reschedule the watchdog, since we've restarted
+ * the init task. When init_task contacts the PF and
+ * gets everything set up again, it'll restart the
+ * watchdog for us. Down, boy. Sit. Stay. Woof.
+ */
+ return;
+ }
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
+ }
- if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ if ((adapter->state < __I40EVF_DOWN) ||
+ (adapter->flags & I40EVF_FLAG_RESET_PENDING))
goto watchdog_done;
- /* check for unannounced reset */
- if ((adapter->state != __I40EVF_RESETTING) &&
+ /* check for reset */
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
(rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
adapter->state = __I40EVF_RESETTING;
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
schedule_work(&adapter->reset_task);
- dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
- __func__);
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1358,16 +1400,25 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_irq_enable(adapter, true);
i40evf_fire_sw_int(adapter, 0xFF);
+
watchdog_done:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+restart_watchdog:
if (adapter->aq_required)
mod_timer(&adapter->watchdog_timer,
jiffies + msecs_to_jiffies(20));
else
mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
schedule_work(&adapter->adminq_task);
}
+static int next_queue(struct i40evf_adapter *adapter, int j)
+{
+ j += 1;
+
+ return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
+}
+
/**
* i40evf_configure_rss - Prepare for RSS if used
* @adapter: board private structure
@@ -1398,19 +1449,19 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
/* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
- if (j == adapter->vsi_res->num_queue_pairs)
- j = 0;
- /* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (j &
- ((0x1 << 8) - 1));
- /* On i = 3, we have 4 entries in lut; write to the register */
- if ((i & 3) == 3)
- wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ j = adapter->vsi_res->num_queue_pairs;
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ lut = next_queue(adapter, j);
+ lut |= next_queue(adapter, j) << 8;
+ lut |= next_queue(adapter, j) << 16;
+ lut |= next_queue(adapter, j) << 24;
+ wr32(hw, I40E_VFQF_HLUT(i), lut);
}
i40e_flush(hw);
}
+#define I40EVF_RESET_WAIT_MS 100
+#define I40EVF_RESET_WAIT_COUNT 200
/**
* i40evf_reset_task - Call-back task to handle hardware reset
* @work: pointer to work_struct
@@ -1421,8 +1472,9 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
**/
static void i40evf_reset_task(struct work_struct *work)
{
- struct i40evf_adapter *adapter =
- container_of(work, struct i40evf_adapter, reset_task);
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ reset_task);
struct i40e_hw *hw = &adapter->hw;
int i = 0, err;
uint32_t rstat_val;
@@ -1431,21 +1483,61 @@ static void i40evf_reset_task(struct work_struct *work)
&adapter->crit_section))
udelay(500);
- /* wait until the reset is complete */
- for (i = 0; i < 20; i++) {
+ if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
+ dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
+ i40evf_request_reset(adapter);
+ }
+
+ /* poll until we see the reset actually happen */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val == I40E_VFR_COMPLETED)
+ if (rstat_val != I40E_VFR_VFACTIVE) {
+ dev_info(&adapter->pdev->dev, "Reset now occurring\n");
break;
- else
- mdelay(100);
+ } else {
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
+ }
+ if (i == I40EVF_RESET_WAIT_COUNT) {
+ dev_err(&adapter->pdev->dev, "Reset was not detected\n");
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ goto continue_reset; /* act like the reset happened */
+ }
+
+ /* wait until the reset is complete and the PF is responding to us */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val == I40E_VFR_VFACTIVE) {
+ dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
+ break;
+ } else {
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
}
- if (i == 20) {
+ if (i == I40EVF_RESET_WAIT_COUNT) {
/* reset never finished */
- dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
- __func__, rstat_val);
- /* carry on anyway */
+ dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
+ rstat_val);
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+
+ if (netif_running(adapter->netdev))
+ i40evf_close(adapter->netdev);
+
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+ i40evf_shutdown_adminq(hw);
+ adapter->netdev->flags &= ~IFF_UP;
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return; /* Do not attempt to reinit. It's dead, Jim. */
}
+
+continue_reset:
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+
i40evf_down(adapter);
adapter->state = __I40EVF_RESETTING;
@@ -1505,6 +1597,9 @@ static void i40evf_adminq_task(struct work_struct *work)
i40e_status ret;
u16 pending;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return;
+
event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf) {
@@ -1636,6 +1731,10 @@ static int i40evf_open(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
int err;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
+ return -EIO;
+ }
if (adapter->state != __I40EVF_DOWN)
return -EBUSY;
@@ -1690,8 +1789,12 @@ static int i40evf_close(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+ if (adapter->state <= __I40EVF_DOWN)
+ return 0;
+
/* signal that we are down to the interrupt handler */
adapter->state = __I40EVF_DOWN;
+
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_down(adapter);
@@ -1842,16 +1945,18 @@ static void i40evf_init_task(struct work_struct *work)
switch (adapter->state) {
case __I40EVF_STARTUP:
/* driver loaded, probe complete */
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
err = i40e_set_mac_type(hw);
if (err) {
- dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
- __func__, err);
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
+ err);
goto err;
}
err = i40evf_check_reset_complete(hw);
if (err) {
- dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
- __func__, err);
+ dev_err(&pdev->dev, "Device is still in reset (%d)\n",
+ err);
goto err;
}
hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -1861,14 +1966,13 @@ static void i40evf_init_task(struct work_struct *work)
err = i40evf_init_adminq(hw);
if (err) {
- dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
- __func__, err);
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+ err);
goto err;
}
err = i40evf_send_api_ver(adapter);
if (err) {
- dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
- __func__, err);
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
i40evf_shutdown_adminq(hw);
goto err;
}
@@ -1876,19 +1980,21 @@ static void i40evf_init_task(struct work_struct *work)
goto restart;
break;
case __I40EVF_INIT_VERSION_CHECK:
- if (!i40evf_asq_done(hw))
+ if (!i40evf_asq_done(hw)) {
+ dev_err(&pdev->dev, "Admin queue command never completed.\n");
goto err;
+ }
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+ dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
err);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable send config request, error %d\n",
+ dev_err(&pdev->dev, "Unable send config request (%d)\n",
err);
goto err;
}
@@ -1902,18 +2008,15 @@ static void i40evf_init_task(struct work_struct *work)
(I40E_MAX_VF_VSI *
sizeof(struct i40e_virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
- if (!adapter->vf_res) {
- dev_err(&pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!adapter->vf_res)
goto err;
- }
}
err = i40evf_get_vf_config(adapter);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
goto restart;
if (err) {
- dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
- __func__, err);
+ dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
+ err);
goto err_alloc;
}
adapter->state = __I40EVF_INIT_SW;
@@ -1927,25 +2030,23 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi_res = &adapter->vf_res->vsi_res[i];
}
if (!adapter->vsi_res) {
- dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+ dev_err(&pdev->dev, "No LAN VSI found\n");
goto err_alloc;
}
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
- adapter->txd_count = I40EVF_DEFAULT_TXD;
- adapter->rxd_count = I40EVF_DEFAULT_RXD;
-
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
-
- netdev->features |= NETIF_F_SG |
+ netdev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_SCTP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
NETIF_F_GRO;
if (adapter->vf_res->vf_offload_flags
@@ -1956,11 +2057,13 @@ static void i40evf_init_task(struct work_struct *work)
NETIF_F_HW_VLAN_CTAG_FILTER;
}
- /* The HW MAC address was set and/or determined in sw_init */
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
- dev_info(&pdev->dev,
- "Invalid MAC address %pMAC, using random\n",
- adapter->hw.mac.addr);
+ dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
+ adapter->hw.mac.addr);
random_ether_addr(adapter->hw.mac.addr);
}
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
@@ -1994,8 +2097,6 @@ static void i40evf_init_task(struct work_struct *work)
netif_carrier_off(netdev);
- strcpy(netdev->name, "eth%d");
-
adapter->vsi.id = adapter->vsi_res->vsi_id;
adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
adapter->vsi.back = adapter;
@@ -2005,9 +2106,11 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
adapter->vsi.netdev = adapter->netdev;
- err = register_netdev(netdev);
- if (err)
- goto err_register;
+ if (!adapter->netdev_registered) {
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+ }
adapter->netdev_registered = true;
@@ -2031,7 +2134,6 @@ err_register:
i40evf_free_misc_irq(adapter);
err_sw_init:
i40evf_reset_interrupt_capability(adapter);
- adapter->state = __I40EVF_FAILED;
err_alloc:
kfree(adapter->vf_res);
adapter->vf_res = NULL;
@@ -2039,9 +2141,7 @@ err:
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
- if (hw->aq.asq.count)
- i40evf_shutdown_adminq(hw); /* ignore error */
- adapter->state = __I40EVF_FAILED;
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
return; /* do not reschedule */
}
schedule_delayed_work(&adapter->init_task, HZ * 3);
@@ -2084,26 +2184,20 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct i40evf_adapter *adapter = NULL;
struct i40e_hw *hw = NULL;
- int err, pci_using_dac;
+ int err;
err = pci_enable_device(pdev);
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = true;
- /* coherent mask for the same size will always succeed if
- * dma_set_mask does
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- pci_using_dac = false;
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- } else {
- dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
- __func__, err);
- err = -EIO;
- goto err_dma;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
}
err = pci_request_regions(pdev, i40evf_driver_name);
@@ -2128,8 +2222,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -2271,6 +2363,7 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40e_hw *hw = &adapter->hw;
cancel_delayed_work_sync(&adapter->init_task);
+ cancel_work_sync(&adapter->reset_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
@@ -2278,17 +2371,15 @@ static void i40evf_remove(struct pci_dev *pdev)
}
adapter->state = __I40EVF_REMOVE;
- if (adapter->num_msix_vectors) {
+ if (adapter->msix_entries) {
i40evf_misc_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
-
- flush_scheduled_work();
-
i40evf_free_misc_irq(adapter);
-
i40evf_reset_interrupt_capability(adapter);
}
+ del_timer_sync(&adapter->watchdog_timer);
+ flush_scheduled_work();
+
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e6978d79e62b..e294f012647d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -43,6 +43,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
struct i40e_hw *hw = &adapter->hw;
i40e_status err;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return 0; /* nothing to see here, move along */
+
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
@@ -651,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
/* if the request failed, don't lock out others */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
+/**
+ * i40evf_request_reset
+ * @adapter: adapter structure
+ *
+ * Request that the PF reset this VF. No response is expected.
+ **/
+void i40evf_request_reset(struct i40evf_adapter *adapter)
+{
+ /* Don't check CURRENT_OP - this is always higher priority */
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
/**
* i40evf_virtchnl_completion
@@ -689,10 +704,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- adapter->state = __I40EVF_RESETTING;
- schedule_work(&adapter->reset_task);
- dev_info(&adapter->pdev->dev,
- "%s: hardware reset pending\n", __func__);
+ dev_info(&adapter->pdev->dev, "PF reset warning received\n");
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+ schedule_work(&adapter->reset_task);
+ }
break;
default:
dev_err(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index f19700e285bb..5bcb2de75933 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
+# Copyright(c) 1999 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
# more details.
#
# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 06df6928f44c..fa36fe12e775 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -77,8 +76,6 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] =
{ 36, 72, 144, 1, 2, 4, 8, 16,
35, 70, 140 };
-#define E1000_82580_RXPBS_TABLE_SIZE \
- (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
/**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -2308,7 +2305,7 @@ u16 igb_rxpbs_adjust_82580(u32 data)
{
u16 ret_val = 0;
- if (data < E1000_82580_RXPBS_TABLE_SIZE)
+ if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
ret_val = e1000_82580_rxpbs_table[data];
return ret_val;
@@ -2714,13 +2711,14 @@ static const u8 e1000_emc_therm_limit[4] = {
E1000_EMC_DIODE3_THERM_LIMIT
};
+#ifdef CONFIG_IGB_HWMON
/**
* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
*
* Updates the temperatures in mac.thermal_sensor_data
**/
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
u16 ets_offset;
@@ -2774,7 +2772,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
* Sets the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
u16 ets_offset;
@@ -2836,6 +2834,7 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
return status;
}
+#endif
static struct e1000_mac_operations e1000_mac_ops_82575 = {
.init_hw = igb_init_hw_82575,
.check_for_link = igb_check_for_link_82575,
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 8c2437722aad..09d78be72416 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -231,6 +230,10 @@ struct e1000_adv_tx_context_desc {
#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */
+#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
#define E1000_VLVF_ARRAY_SIZE 32
#define E1000_VLVF_VLANID_MASK 0x00000FFF
#define E1000_VLVF_POOLSEL_SHIFT 12
@@ -266,8 +269,7 @@ u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
s32 igb_set_eee_i350(struct e1000_hw *);
s32 igb_set_eee_i354(struct e1000_hw *);
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
+s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
#define E1000_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 0571b973be80..b05bf925ac72 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -44,7 +43,11 @@
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
/* Extended Device Control */
+#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */
+#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
+
/* Physical Func Reset Done Indication */
#define E1000_CTRL_EXT_PFRSTD 0x00004000
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -191,7 +194,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
+#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
@@ -529,8 +533,67 @@
#define E1000_TIMINCA_16NS_SHIFT 24
-#define E1000_TSICR_TXTS 0x00000002
-#define E1000_TSIM_TXTS 0x00000002
+/* Time Sync Interrupt Cause/Mask Register Bits */
+
+#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
+#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
+#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
+
+#define TSYNC_INTERRUPTS TSINTR_TXTS
+#define E1000_TSICR_TXTS TSINTR_TXTS
+
+/* TSAUXC Configuration Bits */
+#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
+#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
+#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
+#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
+
+/* SDP Configuration Bits */
+#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
+#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
+#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
+#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
+#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index ab99e2b582a8..10741d170f2d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 0c0393316a3a..db963397cc27 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -35,6 +34,8 @@
#include "e1000_hw.h"
#include "e1000_i210.h"
+static s32 igb_update_flash_i210(struct e1000_hw *hw);
+
/**
* igb_get_hw_semaphore_i210 - Acquire hardware semaphore
* @hw: pointer to the HW structure
@@ -111,7 +112,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
+static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
{
return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
@@ -123,7 +124,7 @@ s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
* then release the semaphores acquired.
**/
-void igb_release_nvm_i210(struct e1000_hw *hw)
+static void igb_release_nvm_i210(struct e1000_hw *hw)
{
igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
@@ -206,8 +207,8 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
* Reads a 16 bit word from the Shadow Ram using the EERD register.
* Uses necessary synchronization semaphores.
**/
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
@@ -306,8 +307,8 @@ out:
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
**/
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
@@ -555,7 +556,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
@@ -590,7 +591,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM. Next commit EEPROM data onto the Flash.
**/
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 checksum = 0;
@@ -684,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
* @hw: pointer to the HW structure
*
**/
-s32 igb_update_flash_i210(struct e1000_hw *hw)
+static s32 igb_update_flash_i210(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u32 flup;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 2d913716573a..907fe99a9813 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -28,17 +27,8 @@
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
-s32 igb_update_flash_i210(struct e1000_hw *hw);
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-void igb_release_nvm_i210(struct e1000_hw *hw);
s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
s32 igb_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 298f0ed50670..5910a932ea7c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e4cbe8ef67b3..99299ba8ee3a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index dac1447fabf7..d5b121771c31 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index de9bba41acf3..f52f5515e5a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index a7db7f3db914..9abf82919c65 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 433b7419cb98..5b101170b17e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index ad2b74d95138..4009bbab7407 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -394,77 +393,6 @@ s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
}
/**
- * e1000_write_sfp_data_byte - Writes SFP module data.
- * @hw: pointer to the HW structure
- * @offset: byte location offset to write to
- * @data: data to write
- *
- * Writes one byte to SFP module data stored
- * in SFP resided EEPROM memory or SFP diagnostic area.
- * Function should be called with
- * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
- * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
- * access
- **/
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
-{
- u32 i = 0;
- u32 i2ccmd = 0;
- u32 data_local = 0;
-
- if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
- hw_dbg("I2CCMD command address exceeds upper limit\n");
- return -E1000_ERR_PHY;
- }
- /* The programming interface is 16 bits wide
- * so we need to read the whole word first
- * then update appropriate byte lane and write
- * the updated word back.
- */
- /* Set up Op-code, EEPROM Address,in the I2CCMD
- * register. The MAC will take care of interfacing
- * with an EEPROM to write the data given.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_READ);
- /* Set a command to read single word */
- wr32(E1000_I2CCMD, i2ccmd);
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- udelay(50);
- /* Poll the ready bit to see if lastly
- * launched I2C operation completed
- */
- i2ccmd = rd32(E1000_I2CCMD);
- if (i2ccmd & E1000_I2CCMD_READY) {
- /* Check if this is READ or WRITE phase */
- if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
- E1000_I2CCMD_OPCODE_READ) {
- /* Write the selected byte
- * lane and update whole word
- */
- data_local = i2ccmd & 0xFF00;
- data_local |= data;
- i2ccmd = ((offset <<
- E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_WRITE | data_local);
- wr32(E1000_I2CCMD, i2ccmd);
- } else {
- break;
- }
- }
- }
- if (!(i2ccmd & E1000_I2CCMD_READY)) {
- hw_dbg("I2CCMD Write did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (i2ccmd & E1000_I2CCMD_ERROR) {
- hw_dbg("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
- return 0;
-}
-
-/**
* igb_read_phy_reg_igp - Read igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6a0873f2095a..4c2c36c46a73 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -70,7 +69,6 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
s32 igb_get_phy_info_82580(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 82632c6c53af..bdb246e848e1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -41,6 +40,7 @@
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
@@ -102,6 +102,14 @@
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */
+#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
+#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
+#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
+#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
+#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
+#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
@@ -349,16 +357,30 @@
#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
* Filter - RW */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
-#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
-#define rd32(reg) (readl(hw->hw_addr + reg))
+struct e1000_hw;
+
+u32 igb_rd32(struct e1000_hw *hw, u32 reg);
+
+/* write operations, indexed using DWORDS */
+#define wr32(reg, val) \
+do { \
+ u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ if (!E1000_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
+} while (0)
+
+#define rd32(reg) (igb_rd32(hw, reg))
+
#define wrfl() ((void)rd32(E1000_STATUS))
#define array_wr32(reg, offset, value) \
- (writel(value, hw->hw_addr + reg + ((offset) << 2)))
+ wr32((reg) + ((offset) << 2), (value))
+
#define array_rd32(reg, offset) \
(readl(hw->hw_addr + reg + ((offset) << 2)))
@@ -397,4 +419,6 @@
#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
+#define E1000_REMOVED(h) unlikely(!(h))
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ccf472f073dd..7fbe1e925143 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -42,6 +41,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/pci.h>
+#include <linux/mdio.h>
struct igb_adapter;
@@ -434,6 +434,7 @@ struct igb_adapter {
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock;
@@ -456,6 +457,7 @@ struct igb_adapter {
unsigned long link_check_timeout;
int copper_tries;
struct e1000_info ei;
+ u16 eee_advert;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -472,6 +474,7 @@ struct igb_adapter {
#define IGB_FLAG_MAS_CAPABLE (1 << 11)
#define IGB_FLAG_MAS_ENABLE (1 << 12)
#define IGB_FLAG_HAS_MSIX (1 << 13)
+#define IGB_FLAG_EEE (1 << 14)
/* Media Auto Sense */
#define IGB_MAS_ENABLE_0 0X0001
@@ -489,7 +492,8 @@ struct igb_adapter {
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
- __IGB_DOWN
+ __IGB_DOWN,
+ __IGB_PTP_TX_IN_PROGRESS,
};
enum igb_boards {
@@ -525,9 +529,7 @@ void igb_set_fw_version(struct igb_adapter *);
void igb_ptp_init(struct igb_adapter *adapter);
void igb_ptp_stop(struct igb_adapter *adapter);
void igb_ptp_reset(struct igb_adapter *adapter);
-void igb_ptp_tx_work(struct work_struct *work);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
struct sk_buff *skb);
@@ -545,8 +547,8 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd);
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
#ifdef CONFIG_IGB_HWMON
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 1df02378de69..e5570acbeea8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -2274,15 +2273,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
ring = adapter->tx_ring[j];
do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
data[i] = ring->tx_stats.packets;
data[i+1] = ring->tx_stats.bytes;
data[i+2] = ring->tx_stats.restart_queue;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
restart2 = ring->tx_stats.restart_queue2;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
data[i+2] += restart2;
i += IGB_TX_QUEUE_STATS_LEN;
@@ -2290,13 +2289,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
do {
- start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
data[i] = ring->rx_stats.packets;
data[i+1] = ring->rx_stats.bytes;
data[i+2] = ring->rx_stats.drops;
data[i+3] = ring->rx_stats.csum_err;
data[i+4] = ring->rx_stats.alloc_failed;
- } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
spin_unlock(&adapter->stats64_lock);
@@ -2354,6 +2353,11 @@ static int igb_get_ts_info(struct net_device *dev,
{
struct igb_adapter *adapter = netdev_priv(dev);
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
switch (adapter->hw.mac.type) {
case e1000_82575:
info->so_timestamping =
@@ -2375,11 +2379,6 @@ static int igb_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- if (adapter->ptp_clock)
- info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
-
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -2588,7 +2587,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ipcnfg, eeer, ret_val;
+ u32 ret_val;
u16 phy_data;
if ((hw->mac.type < e1000_i350) ||
@@ -2597,16 +2596,25 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->supported = (SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full);
+ if (!hw->dev_spec._82575.eee_disable)
+ edata->advertised =
+ mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ /* The IPCNFG and EEER registers are not supported on I354. */
+ if (hw->mac.type == e1000_i354) {
+ igb_get_eee_status_i354(hw, (bool *)&edata->eee_active);
+ } else {
+ u32 eeer;
- ipcnfg = rd32(E1000_IPCNFG);
- eeer = rd32(E1000_EEER);
+ eeer = rd32(E1000_EEER);
- /* EEE status on negotiated link */
- if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
- edata->advertised = ADVERTISED_1000baseT_Full;
+ /* EEE status on negotiated link */
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
- if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
- edata->advertised |= ADVERTISED_100baseT_Full;
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+ }
/* EEE Link Partner Advertised */
switch (hw->mac.type) {
@@ -2617,8 +2625,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -ENODATA;
edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
-
break;
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
@@ -2634,12 +2642,10 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
break;
}
- if (eeer & E1000_EEER_EEE_NEG)
- edata->eee_active = true;
-
edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
- if (eeer & E1000_EEER_TX_LPI_EN)
+ if ((hw->mac.type == e1000_i354) &&
+ (edata->eee_enabled))
edata->tx_lpi_enabled = true;
/* Report correct negotiated EEE status for devices that
@@ -2687,9 +2693,10 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- if (eee_curr.advertised != edata->advertised) {
+ if (edata->advertised &
+ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
dev_err(&adapter->pdev->dev,
- "Setting EEE Advertisement is not supported\n");
+ "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
return -EINVAL;
}
@@ -2699,9 +2706,14 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
- igb_set_eee_i350(hw);
+ adapter->flags |= IGB_FLAG_EEE;
+ if (hw->mac.type == e1000_i350)
+ igb_set_eee_i350(hw);
+ else
+ igb_set_eee_i354(hw);
/* reset link */
if (netif_running(netdev))
@@ -2779,9 +2791,11 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
- if (status != E1000_SUCCESS)
+ if (status != E1000_SUCCESS) {
/* Error occurred while reading module */
+ kfree(dataword);
return -EIO;
+ }
be16_to_cpus(&dataword[i]);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index e0af5bc61613..8333f67acf96 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 46d31a49f5ea..30198185d19a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -70,7 +69,7 @@ char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
static const char igb_copyright[] =
- "Copyright (c) 2007-2013 Intel Corporation.";
+ "Copyright (c) 2007-2014 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -752,6 +751,28 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
}
+u32 igb_rd32(struct e1000_hw *hw, u32 reg)
+{
+ struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
+ u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value = 0;
+
+ if (E1000_REMOVED(hw_addr))
+ return ~value;
+
+ value = readl(&hw_addr[reg]);
+
+ /* reads should not return all F's */
+ if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+ struct net_device *netdev = igb->netdev;
+ hw->hw_addr = NULL;
+ netif_device_detach(netdev);
+ netdev_err(netdev, "PCIe link lost, device now detached\n");
+ }
+
+ return value;
+}
+
/**
* igb_write_ivar - configure ivar for given MSI-X vector
* @hw: pointer to the HW structure
@@ -1014,6 +1035,12 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /* Coming from igb_set_interrupt_capability, the vectors are not yet
+ * allocated. So, q_vector is NULL so we should stop here.
+ */
+ if (!q_vector)
+ return;
+
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
@@ -1111,16 +1138,18 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- numvecs);
- if (err == 0)
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
return;
igb_reset_interrupt_capability(adapter);
/* If we can't do MSI-X, try MSI */
msi_only:
+ adapter->flags &= ~IGB_FLAG_HAS_MSIX;
#ifdef CONFIG_PCI_IOV
/* disable SR-IOV for non MSI-X configurations */
if (adapter->vf_data) {
@@ -1726,6 +1755,10 @@ int igb_up(struct igb_adapter *adapter)
hw->mac.get_link_status = 1;
schedule_work(&adapter->watchdog_task);
+ if ((adapter->flags & IGB_FLAG_EEE) &&
+ (!hw->dev_spec._82575.eee_disable))
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
return 0;
}
@@ -1974,6 +2007,21 @@ void igb_reset(struct igb_adapter *adapter)
}
}
#endif
+ /* Re-establish EEE setting */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (mac->type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ igb_set_eee_i350(hw);
+ break;
+ case e1000_i354:
+ igb_set_eee_i354(hw);
+ break;
+ default:
+ break;
+ }
+ }
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
@@ -2560,23 +2608,36 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
adapter->num_rx_queues, adapter->num_tx_queues);
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_i210:
- case e1000_i211:
- igb_set_eee_i350(hw);
- break;
- case e1000_i354:
- if (hw->phy.media_type == e1000_media_type_copper) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable EEE for internal copper PHY devices */
+ err = igb_set_eee_i350(hw);
+ if ((!err) &&
+ (!hw->dev_spec._82575.eee_disable)) {
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ adapter->flags |= IGB_FLAG_EEE;
+ }
+ break;
+ case e1000_i354:
if ((rd32(E1000_CTRL_EXT) &
- E1000_CTRL_EXT_LINK_MODE_SGMII))
- igb_set_eee_i354(hw);
+ E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ err = igb_set_eee_i354(hw);
+ if ((!err) &&
+ (!hw->dev_spec._82575.eee_disable)) {
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ adapter->flags |= IGB_FLAG_EEE;
+ }
+ }
+ break;
+ default:
+ break;
}
- break;
- default:
- break;
}
-
pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -2591,7 +2652,7 @@ err_eeprom:
iounmap(hw->flash_address);
err_sw_init:
igb_clear_interrupt_scheme(adapter);
- iounmap(hw->hw_addr);
+ pci_iounmap(pdev, hw->hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -2758,7 +2819,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_disable_sriov(pdev);
#endif
- iounmap(hw->hw_addr);
+ pci_iounmap(pdev, hw->hw_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev,
@@ -3510,6 +3571,13 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
vmolr = rd32(E1000_VMOLR(vfn));
vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ if (hw->mac.type == e1000_i350) {
+ u32 dvmolr;
+
+ dvmolr = rd32(E1000_DVMOLR(vfn));
+ dvmolr |= E1000_DVMOLR_STRVLAN;
+ wr32(E1000_DVMOLR(vfn), dvmolr);
+ }
if (aupe)
vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
else
@@ -4158,6 +4226,15 @@ static void igb_watchdog_task(struct work_struct *work)
(ctrl & E1000_CTRL_RFCE) ? "RX" :
(ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
+ /* disable EEE if enabled */
+ if ((adapter->flags & IGB_FLAG_EEE) &&
+ (adapter->link_duplex == HALF_DUPLEX)) {
+ dev_info(&adapter->pdev->dev,
+ "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
+ adapter->hw.dev_spec._82575.eee_disable = true;
+ adapter->flags &= ~IGB_FLAG_EEE;
+ }
+
/* check if SmartSpeed worked */
igb_check_downshift(hw);
if (phy->speed_downgraded)
@@ -4306,8 +4383,7 @@ enum latency_range {
* were determined based on theoretical maximum wire speed and testing
* data, in order to minimize response time while increasing bulk
* throughput.
- * This functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
+ * This functionality is controlled by ethtool's coalescing settings.
* NOTE: This function is called only when operating in a multiqueue
* receive environment.
**/
@@ -4381,8 +4457,7 @@ clear_counts:
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
+ * This functionality is controlled by ethtool's coalescing settings.
* NOTE: These calculations are only valid when operating in a single-
* queue environment.
**/
@@ -4546,7 +4621,7 @@ static int igb_tso(struct igb_ring *tx_ring,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -4602,12 +4677,12 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
l4_hdr = ip_hdr(skb)->protocol;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
@@ -4905,12 +4980,11 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- skb_tx_timestamp(skb);
-
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
- if (!(adapter->ptp_tx_skb)) {
+ if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
@@ -4921,6 +4995,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
}
}
+ skb_tx_timestamp(skb);
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -5127,10 +5203,10 @@ void igb_update_stats(struct igb_adapter *adapter,
}
do {
- start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
_bytes = ring->rx_stats.bytes;
_packets = ring->rx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -5143,10 +5219,10 @@ void igb_update_stats(struct igb_adapter *adapter,
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
_bytes = ring->tx_stats.bytes;
_packets = ring->tx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -6620,7 +6696,9 @@ static inline void igb_rx_hash(struct igb_ring *ring,
struct sk_buff *skb)
{
if (ring->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ skb_set_hash(skb,
+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
}
/**
@@ -6690,7 +6768,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
hdr.network += ETH_HLEN;
/* handle any vlan tag if present */
- if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if (protocol == htons(ETH_P_8021Q)) {
if ((hdr.network - data) > (max_len - VLAN_HLEN))
return max_len;
@@ -6699,7 +6777,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
}
/* handle L3 protocols */
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
return max_len;
@@ -6713,7 +6791,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
/* record next protocol if header is present */
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
nexthdr = hdr.ipv4->protocol;
- } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ } else if (protocol == htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
return max_len;
@@ -6903,7 +6981,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
- do {
+ while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
/* return some buffers to hardware, one at a time is too slow */
@@ -6955,7 +7033,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* update budget accounting */
total_packets++;
- } while (likely(total_packets < budget));
+ }
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
@@ -7114,8 +7192,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
+ case SIOCGHWTSTAMP:
+ return igb_ptp_get_ts_config(netdev, ifr);
case SIOCSHWTSTAMP:
- return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+ return igb_ptp_set_ts_config(netdev, ifr);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5a54e3dc535d..2cca8fd5e574 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -12,9 +12,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/device.h>
@@ -75,6 +74,8 @@
#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+
/* SYSTIM read access for the 82576 */
static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
{
@@ -372,7 +373,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
**/
-void igb_ptp_tx_work(struct work_struct *work)
+static void igb_ptp_tx_work(struct work_struct *work)
{
struct igb_adapter *adapter = container_of(work, struct igb_adapter,
ptp_tx_work);
@@ -386,6 +387,7 @@ void igb_ptp_tx_work(struct work_struct *work)
IGB_PTP_TX_TIMEOUT)) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
return;
@@ -466,7 +468,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
**/
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
@@ -479,6 +481,7 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
@@ -540,10 +543,26 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
}
/**
- * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * igb_ptp_get_ts_config - get hardware time stamping config
+ * @netdev:
+ * @ifreq:
+ *
+ * Get the hwtstamp_config settings to return to the user. Rather than attempt
+ * to deconstruct the settings from the registers, just return a shadow copy
+ * of the last known settings.
+ **/
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+/**
+ * igb_ptp_set_ts_config - control hardware time stamping
* @netdev:
* @ifreq:
- * @cmd:
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't case any overhead
@@ -557,12 +576,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
**/
-int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
+ struct hwtstamp_config *config = &adapter->tstamp_config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 tsync_rx_cfg = 0;
@@ -570,14 +588,14 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
bool is_l2 = false;
u32 regval;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
return -EFAULT;
/* reserved for future extensions */
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
@@ -586,7 +604,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
@@ -610,7 +628,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
@@ -621,12 +639,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
*/
if (hw->mac.type != e1000_82576) {
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
/* fall through */
default:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -643,7 +661,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
is_l2 = true;
is_l4 = true;
@@ -707,7 +725,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
regval = rd32(E1000_RXSTMPL);
regval = rd32(E1000_RXSTMPH);
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
@@ -798,7 +816,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
/* Initialize the time sync interrupts for devices that support it. */
if (hw->mac.type >= e1000_82580) {
- wr32(E1000_TSIM, E1000_TSIM_TXTS);
+ wr32(E1000_TSIM, TSYNC_INTERRUPTS);
wr32(E1000_IMS, E1000_IMS_TS);
}
@@ -841,6 +859,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
if (adapter->ptp_clock) {
@@ -864,6 +883,9 @@ void igb_ptp_reset(struct igb_adapter *adapter)
if (!(adapter->flags & IGB_FLAG_PTP))
return;
+ /* reset the tstamp_config */
+ memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+
switch (adapter->hw.mac.type) {
case e1000_82576:
/* Dial the nominal frequency. */
@@ -876,7 +898,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
case e1000_i211:
/* Enable the timer functions and interrupts. */
wr32(E1000_TSAUXC, 0x0);
- wr32(E1000_TSIM, E1000_TSIM_TXTS);
+ wr32(E1000_TSIM, TSYNC_INTERRUPTS);
wr32(E1000_IMS, E1000_IMS_TS);
break;
default:
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 675435fc2e53..b7ab03a2f28f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
for (i = 0; i < 3; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries, 3);
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries, 3, 3);
}
- if (err) {
+ if (err < 0) {
/* MSI-X failed */
dev_err(&adapter->pdev->dev,
"Failed to initialize MSI-X interrupts.\n");
@@ -2014,12 +2014,12 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
break;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 57e390cbe6d0..f42c201f727f 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1521,12 +1521,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int tso;
if (test_bit(__IXGB_DOWN, &adapter->flags)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb->len <= 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1543,7 +1543,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = ixgb_tso(adapter, skb);
if (tso < 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0186ea2969fe..55c53a1cbb62 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -765,6 +766,7 @@ struct ixgbe_adapter {
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_overflow_check;
unsigned long last_rx_ptp_check;
@@ -806,10 +808,12 @@ enum ixgbe_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
+ __IXGBE_DISABLED,
__IXGBE_REMOVING,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
__IXGBE_PTP_RUNNING,
+ __IXGBE_PTP_TX_IN_PROGRESS,
};
struct ixgbe_cb {
@@ -884,7 +888,6 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -958,8 +961,8 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
- int cmd);
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index a26f3fee4f35..4c78ea8946c1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -57,10 +58,12 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
**/
static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
u16 pcie_devctl2;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
/* only take action if timeout value is defaulted to 0 */
if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
goto out;
@@ -79,11 +82,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
* directly in order to set the completion timeout value for
* 16ms to 55ms
*/
- pci_read_config_word(adapter->pdev,
- IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
+ pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
- pci_write_config_word(adapter->pdev,
- IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+ ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
@@ -100,6 +101,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -201,8 +203,6 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
- hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
-
/* set the completion timeout for interface */
if (ret_val == 0)
ixgbe_set_pcie_completion_timeout(hw);
@@ -1237,14 +1237,14 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
}
/**
- * ixgbe_set_rxpba_82598 - Configure packet buffers
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure packet buffers.
- */
-static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
- int strategy)
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy)
{
u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
@@ -1315,7 +1315,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.release_swfw_sync = &ixgbe_release_swfw_sync,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
- .mng_fw_enabled = NULL,
+ .prot_autoc_read = &prot_autoc_read_generic,
+ .prot_autoc_write = &prot_autoc_write_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index edda6814108c..f32b3dd1ba8e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -63,8 +64,10 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
u32 fwsm, manc, factps;
@@ -91,7 +94,7 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
* and MNG not enabled
*/
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- !hw->mng_fw_enabled) {
+ !ixgbe_mng_enabled(hw)) {
mac->ops.disable_tx_laser =
&ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
@@ -122,7 +125,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
u16 list_offset, data_offset, data_value;
- bool got_lock = false;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -160,30 +162,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
- /* Need SW/FW semaphore around AUTOC writes if LESM on,
- * likewise reset_pipeline requires lock as it also writes
- * AUTOC.
- */
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto setup_sfp_out;
-
- got_lock = true;
- }
-
/* Restart DSP and set SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
- IXGBE_AUTOC_LMS_10G_SERIAL));
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- ret_val = ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock) {
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- got_lock = false;
- }
+ ret_val = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
+ false);
if (ret_val) {
hw_dbg(hw, " sfp module setup not complete\n");
@@ -207,6 +189,81 @@ setup_sfp_err:
return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
}
+/**
+ * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @locked: Return the if we locked for this read.
+ * @reg_val: Value we read from AUTOC
+ *
+ * For this part (82599) we need to wrap read-modify-writes with a possible
+ * FW/SW lock. It is assumed this lock will be freed with the next
+ * prot_autoc_write_82599(). Note, that locked can only be true in cases
+ * where this function doesn't return an error.
+ **/
+static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+ u32 *reg_val)
+{
+ s32 ret_val;
+
+ *locked = false;
+ /* If LESM is on then we need to hold the SW/FW semaphore. */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ *locked = true;
+ }
+
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return 0;
+}
+
+/**
+ * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous proc_autoc_read_82599.
+ *
+ * This part (82599) may need to hold a the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ **/
+static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+{
+ s32 ret_val = 0;
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ /* We only need to get the lock if:
+ * - We didn't do it already (in the read part of a read-modify-write)
+ * - LESM is enabled.
+ */
+ if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ locked = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+out:
+ /* Free the SW/FW semaphore as we either grabbed it here or
+ * already had it when this function was called.
+ */
+ if (locked)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+ return ret_val;
+}
+
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -216,6 +273,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -456,12 +514,20 @@ out:
*
* Disables link, should be called during D3 power down sequence.
*
- */
+ **/
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
{
- u32 autoc2_reg;
+ u32 autoc2_reg, fwsm;
+ u16 ee_ctrl_2 = 0;
+
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
- if (!hw->mng_fw_enabled && !hw->wol_enabled) {
+ /* Check to see if MNG FW could be enabled */
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+
+ if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
+ !hw->wol_enabled &&
+ ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
@@ -542,6 +608,10 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
/* Disable tx laser; allow 100us to go dark per spec */
esdp_reg |= IXGBE_ESDP_SDP3;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
@@ -582,6 +652,10 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
**/
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
if (hw->mac.autotry_restart) {
ixgbe_disable_tx_laser_multispeed_fiber(hw);
ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -590,75 +664,6 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
- * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
- * @hw: pointer to hardware structure
- * @speed: link speed to set
- *
- * We set the module speed differently for fixed fiber. For other
- * multi-speed devices we don't have an error value so here if we
- * detect an error we just log it and exit.
- */
-static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed)
-{
- s32 status;
- u8 rs, eeprom_data;
-
- switch (speed) {
- case IXGBE_LINK_SPEED_10GB_FULL:
- /* one bit mask same as setting on */
- rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
- break;
- default:
- hw_dbg(hw, "Invalid fixed module speed\n");
- return;
- }
-
- /* Set RS0 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
- goto out;
- }
-
- /* Set RS1 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
- goto out;
- }
-out:
- return;
-}
-
-/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -768,10 +773,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
- ixgbe_set_fiber_fixed_speed(hw,
- IXGBE_LINK_SPEED_1GB_FULL);
- break;
case ixgbe_media_type_fiber:
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
@@ -941,8 +942,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
out:
if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
- hw_dbg(hw, "Smartspeed has downgraded the link speed from "
- "the maximum advertised\n");
+ hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n");
return status;
}
@@ -958,16 +958,19 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
+ bool autoneg = false;
s32 status = 0;
- u32 autoc, pma_pmd_1g, link_mode, start_autoc;
+ u32 pma_pmd_1g, link_mode, links_reg, i;
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 orig_autoc = 0;
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
- u32 links_reg;
- u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
- bool got_lock = false;
- bool autoneg = false;
+
+ /* holds the value of AUTOC register at this current point in time */
+ u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ /* holds the cached value of AUTOC register */
+ u32 orig_autoc = 0;
+ /* temporary variable used for comparison purposes */
+ u32 autoc = current_autoc;
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -984,12 +987,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
- autoc = hw->mac.orig_autoc;
+ orig_autoc = hw->mac.orig_autoc;
else
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ orig_autoc = autoc;
- orig_autoc = autoc;
- start_autoc = hw->mac.cached_autoc;
link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
@@ -1029,28 +1030,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
}
- if (autoc != start_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is on,
- * likewise reset_pipeline requires us to hold this lock as
- * it also writes to AUTOC.
- */
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status != 0)
- goto out;
-
- got_lock = true;
- }
-
+ if (autoc != current_autoc) {
/* Restart link */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
- hw->mac.cached_autoc = autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
+ if (status)
+ goto out;
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@@ -1068,8 +1052,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
status =
IXGBE_ERR_AUTONEG_NOT_COMPLETE;
- hw_dbg(hw, "Autoneg did not "
- "complete.\n");
+ hw_dbg(hw, "Autoneg did not complete.\n");
}
}
}
@@ -1117,7 +1100,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
s32 status;
- u32 ctrl, i, autoc2;
+ u32 ctrl, i, autoc, autoc2;
u32 curr_lms;
bool link_up = false;
@@ -1151,11 +1134,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset(hw);
/* remember AUTOC from before we reset */
- if (hw->mac.cached_autoc)
- curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
- else
- curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
- IXGBE_AUTOC_LMS_MASK;
+ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
mac_reset_top:
/*
@@ -1205,7 +1184,7 @@ mac_reset_top:
* stored off yet. Otherwise restore the stored original
* values since the reset operation sets back to defaults.
*/
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
/* Enable link if disabled in NVM */
@@ -1216,7 +1195,7 @@ mac_reset_top:
}
if (hw->mac.orig_link_settings_stored == false) {
- hw->mac.orig_autoc = hw->mac.cached_autoc;
+ hw->mac.orig_autoc = autoc;
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
@@ -1227,34 +1206,18 @@ mac_reset_top:
* Likewise if we support WoL we don't want change the
* LMS state either.
*/
- if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
+ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
hw->wol_enabled)
hw->mac.orig_autoc =
(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
curr_lms;
- if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is
- * on, likewise reset_pipeline requires us to hold
- * this lock as it also writes to AUTOC.
- */
- bool got_lock = false;
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status)
- goto reset_hw_out;
-
- got_lock = true;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
- hw->mac.cached_autoc = hw->mac.orig_autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ if (autoc != hw->mac.orig_autoc) {
+ status = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc,
+ false);
+ if (status)
+ goto reset_hw_out;
}
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -1634,35 +1597,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
{
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 bucket_hash = 0;
+ u32 bucket_hash = 0, hi_dword = 0;
+ int i;
/* Apply masks to input data */
- input->dword_stream[0] &= input_mask->dword_stream[0];
- input->dword_stream[1] &= input_mask->dword_stream[1];
- input->dword_stream[2] &= input_mask->dword_stream[2];
- input->dword_stream[3] &= input_mask->dword_stream[3];
- input->dword_stream[4] &= input_mask->dword_stream[4];
- input->dword_stream[5] &= input_mask->dword_stream[5];
- input->dword_stream[6] &= input_mask->dword_stream[6];
- input->dword_stream[7] &= input_mask->dword_stream[7];
- input->dword_stream[8] &= input_mask->dword_stream[8];
- input->dword_stream[9] &= input_mask->dword_stream[9];
- input->dword_stream[10] &= input_mask->dword_stream[10];
+ for (i = 0; i <= 10; i++)
+ input->dword_stream[i] &= input_mask->dword_stream[i];
/* record the flow_vm_vlan bits as they are a key part to the hash */
flow_vm_vlan = ntohl(input->dword_stream[0]);
/* generate common hash dword */
- hi_hash_dword = ntohl(input->dword_stream[1] ^
- input->dword_stream[2] ^
- input->dword_stream[3] ^
- input->dword_stream[4] ^
- input->dword_stream[5] ^
- input->dword_stream[6] ^
- input->dword_stream[7] ^
- input->dword_stream[8] ^
- input->dword_stream[9] ^
- input->dword_stream[10]);
+ for (i = 1; i <= 10; i++)
+ hi_dword ^= input->dword_stream[i];
+ hi_hash_dword = ntohl(hi_dword);
/* low dword is word swapped version of common */
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
@@ -1681,21 +1629,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
/* Process remaining 30 bit of the key */
- IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+ for (i = 1; i <= 15; i++)
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
/*
* Limit hash to 13 bits since max bucket count is 8K.
@@ -2001,7 +1936,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
/* We need to run link autotry after the driver loads */
hw->mac.autotry_restart = true;
- hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
if (ret_val == 0)
ret_val = ixgbe_verify_fw_version_82599(hw);
@@ -2260,7 +2194,7 @@ fw_version_err:
* Returns true if the LESM FW module is present and enabled. Otherwise
* returns false. Smart Speed must be disabled if LESM FW module is enabled.
**/
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
bool lesm_enabled = false;
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2366,7 +2300,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
* to AUTOC, so this function assumes the semaphore is held.
**/
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
s32 ret_val;
u32 anlp1_reg = 0;
@@ -2380,11 +2314,12 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
- autoc_reg = hw->mac.cached_autoc;
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+ autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
@@ -2565,7 +2500,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.release_swfw_sync = &ixgbe_release_swfw_sync,
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
- .mng_fw_enabled = &ixgbe_mng_enabled,
+ .prot_autoc_read = &prot_autoc_read_82599,
+ .prot_autoc_write = &prot_autoc_write_82599,
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b5c434b617b1..24fba39e194e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -72,7 +73,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
bool link_up;
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
hw->mac.ops.check_link(hw, &speed, &link_up, false);
/* if link is down, assume supported */
@@ -114,7 +114,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
- bool got_lock = false;
+ bool locked = false;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -139,11 +139,16 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
- case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
+ /* some MAC's need RMW protection on AUTOC */
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
+ if (ret_val)
+ goto out;
+
+ /* only backplane uses autoc so fall though */
+ case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
break;
case ixgbe_media_type_copper:
hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
@@ -240,27 +245,12 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* LESM is on, likewise reset_pipeline requries the lock as
* it also writes AUTOC.
*/
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
-
- got_lock = true;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
-
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
+ if (ret_val)
+ goto out;
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- ixgbe_device_supports_autoneg_fc(hw)) {
+ ixgbe_device_supports_autoneg_fc(hw)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN, reg_cu);
}
@@ -656,20 +646,17 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
**/
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
- struct ixgbe_mac_info *mac = &hw->mac;
u16 link_status;
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
- pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
- &link_status);
+ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
- mac->ops.set_lan_id(hw);
+ hw->mac.ops.set_lan_id(hw);
return 0;
}
@@ -2406,7 +2393,6 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2437,6 +2423,53 @@ out:
}
/**
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ * Add 10% to specified maximum and return the number of times to poll for
+ * completion timeout, in units of 100 microsec. Never return less than
+ * 800 = 80 millisec.
+ **/
+static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+ s16 devctl2;
+ u32 pollcnt;
+
+ devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+ switch (devctl2) {
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ pollcnt = 1300; /* 130 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ pollcnt = 5200; /* 520 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ pollcnt = 20000; /* 2 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ pollcnt = 80000; /* 8 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ pollcnt = 34000; /* 34 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
+ case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
+ case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
+ default:
+ pollcnt = 800; /* 80 millisec minimum */
+ break;
+ }
+
+ /* add 10% to spec maximum */
+ return (pollcnt * 11) / 10;
+}
+
+/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
*
@@ -2447,16 +2480,16 @@ out:
**/
static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
s32 status = 0;
- u32 i;
+ u32 i, poll;
u16 value;
/* Always set this bit to ensure any future transactions are blocked */
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Exit if master requests are blocked */
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+ ixgbe_removed(hw->hw_addr))
goto out;
/* Poll for master request bit to clear */
@@ -2481,10 +2514,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
*/
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
udelay(100);
- pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
- &value);
+ value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (ixgbe_removed(hw->hw_addr))
+ goto out;
if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
goto out;
}
@@ -2564,6 +2599,35 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
}
/**
+ * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @reg_val: Value we read from AUTOC
+ * @locked: bool to indicate whether the SW/FW lock should be taken. Never
+ * true in this the generic case.
+ *
+ * The default case requires no protection so just to the register read.
+ **/
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ *locked = false;
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return 0;
+}
+
+/**
+ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous read.
+ **/
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
+ return 0;
+}
+
+/**
* ixgbe_disable_rx_buff_generic - Stops the receive data path
* @hw: pointer to hardware structure
*
@@ -2641,6 +2705,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = 0;
+ bool locked = false;
/*
* Link must be up to auto-blink the LEDs;
@@ -2649,28 +2714,19 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- bool got_lock = false;
-
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val)
+ goto out;
- got_lock = true;
- }
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val)
+ goto out;
+
IXGBE_WRITE_FLUSH(hw);
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
usleep_range(10000, 20000);
}
@@ -2690,33 +2746,21 @@ out:
**/
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = 0;
- bool got_lock = false;
+ bool locked = false;
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
-
- got_lock = true;
- }
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val)
+ goto out;
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val)
+ goto out;
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2817,7 +2861,6 @@ san_mac_addr_clr:
**/
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
u16 msix_count = 1;
u16 max_msix_count;
u16 pcie_offset;
@@ -2836,7 +2879,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
return msix_count;
}
- pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
+ msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
+ if (ixgbe_removed(hw->hw_addr))
+ msix_count = 0;
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW */
@@ -2868,6 +2913,9 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ if (ixgbe_removed(hw->hw_addr))
+ goto done;
+
if (!mpsar_lo && !mpsar_hi)
goto done;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f2e3919750ec..f12c40fb5537 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -98,6 +99,10 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
+
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
@@ -106,10 +111,10 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
@@ -125,6 +130,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
#define IXGBE_FAILED_READ_REG 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_WORD 0xffffU
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
static inline bool ixgbe_removed(void __iomem *addr)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 05e23b80b5e3..bdb99b3b0f30 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d71d9ce3e394..d5a1e3db0774 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index c5933f6dceee..472b0f450bf9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 043307024c4a..6c55c14d082a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -1127,10 +1128,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
@@ -1155,10 +1156,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
@@ -1247,6 +1248,11 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
struct ixgbe_hw *hw = &adapter->hw;
bool link_up;
u32 link_speed = 0;
+
+ if (ixgbe_removed(hw->hw_addr)) {
+ *data = 1;
+ return 1;
+ }
*data = 0;
hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
@@ -1969,6 +1975,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
data[1] = 1;
data[2] = 1;
data[3] = 1;
+ data[4] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
return;
}
@@ -1988,6 +1995,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
data[1] = 1;
data[2] = 1;
data[3] = 1;
+ data[4] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__IXGBE_TESTING,
&adapter->state);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 08726177a3eb..25a3dfef33e8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -407,13 +408,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
/* return 0 to bypass going to ULD for DDPed data */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
rc = 0;
break;
/* unmap the sg list when FCPRSP is received */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = ddp_err;
@@ -421,14 +422,14 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
ddp->sgc = 0;
/* fall through */
/* if DDP length is present pass it through to ULD */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
if (ddp->len)
rc = ddp->len;
break;
/* no match will return as an error */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 3a02759b5e95..b16cc786750d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 32e3eaaa160a..2067d392cc3d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -698,7 +699,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
int vectors)
{
- int err, vector_threshold;
+ int vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
@@ -712,18 +713,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err) /* Success in acquiring all requested vectors. */
- break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (vectors < vector_threshold) {
+ if (vectors < 0) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 18076c4178b4..8436c651b735 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -67,7 +68,7 @@ static char ixgbe_default_device_descr[] =
#define DRV_VERSION "3.19.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2013 Intel Corporation.";
+ "Copyright (c) 1999-2014 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -151,6 +152,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
@@ -169,6 +172,9 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
return -1;
pcie_capability_read_word(parent_dev, reg, value);
+ if (*value == IXGBE_FAILED_READ_CFG_WORD &&
+ ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
+ return -1;
return 0;
}
@@ -313,6 +319,57 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
ixgbe_remove_adapter(hw);
}
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
+{
+ u16 value;
+
+ pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
+ if (value == IXGBE_FAILED_READ_CFG_WORD) {
+ ixgbe_remove_adapter(hw);
+ return true;
+ }
+ return false;
+}
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u16 value;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return IXGBE_FAILED_READ_CFG_WORD;
+ pci_read_config_word(adapter->pdev, reg, &value);
+ if (value == IXGBE_FAILED_READ_CFG_WORD &&
+ ixgbe_check_cfg_remove(hw, adapter->pdev))
+ return IXGBE_FAILED_READ_CFG_WORD;
+ return value;
+}
+
+#ifdef CONFIG_PCI_IOV
+static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u32 value;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return IXGBE_FAILED_READ_CFG_DWORD;
+ pci_read_config_dword(adapter->pdev, reg, &value);
+ if (value == IXGBE_FAILED_READ_CFG_DWORD &&
+ ixgbe_check_cfg_remove(hw, adapter->pdev))
+ return IXGBE_FAILED_READ_CFG_DWORD;
+ return value;
+}
+#endif /* CONFIG_PCI_IOV */
+
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+ pci_write_config_word(adapter->pdev, reg, value);
+}
+
static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -1264,7 +1321,9 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
if (ring->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ skb_set_hash(skb,
+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
}
#ifdef IXGBE_FCOE
@@ -1480,7 +1539,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
hdr.network += ETH_HLEN;
/* handle any vlan tag if present */
- if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if (protocol == htons(ETH_P_8021Q)) {
if ((hdr.network - data) > (max_len - VLAN_HLEN))
return max_len;
@@ -1489,7 +1548,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
}
/* handle L3 protocols */
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
return max_len;
@@ -1503,7 +1562,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
/* record next protocol if header is present */
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
nexthdr = hdr.ipv4->protocol;
- } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ } else if (protocol == htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
return max_len;
@@ -1511,7 +1570,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
nexthdr = hdr.ipv6->nexthdr;
hlen = sizeof(struct ipv6hdr);
#ifdef IXGBE_FCOE
- } else if (protocol == __constant_htons(ETH_P_FCOE)) {
+ } else if (protocol == htons(ETH_P_FCOE)) {
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
return max_len;
hlen = FCOE_HEADER_LEN;
@@ -2026,7 +2085,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
- do {
+ while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
@@ -2101,7 +2160,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* update budget accounting */
total_rx_packets++;
- } while (likely(total_rx_packets < budget));
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -2630,9 +2689,12 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_ECC)
- e_info(link, "Received unrecoverable ECC Err, please "
- "reboot\n");
+ if (eicr & IXGBE_EICR_ECC) {
+ e_info(link, "Received ECC Err, initiating reset\n");
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ }
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
int reinit_count = 0;
@@ -2846,9 +2908,12 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_sfp_event(adapter, eicr);
/* Fall through */
case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_ECC)
- e_info(link, "Received unrecoverable ECC err, please "
- "reboot\n");
+ if (eicr & IXGBE_EICR_ECC) {
+ e_info(link, "Received ECC Err, initiating reset\n");
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ }
ixgbe_check_overtemp_event(adapter, eicr);
break;
default:
@@ -4590,8 +4655,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *upper;
- struct list_head *iter;
int err;
u32 ctrl_ext;
@@ -4633,19 +4696,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
e_crit(drv, "Fan has stopped, replace the adapter\n");
}
- /* enable transmits */
- netif_tx_start_all_queues(adapter->netdev);
-
- /* enable any upper devices */
- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
- if (netif_is_macvlan(upper)) {
- struct macvlan_dev *vlan = netdev_priv(upper);
-
- if (vlan->fwd_priv)
- netif_tx_start_all_queues(upper);
- }
- }
-
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn't be a problem */
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -5502,6 +5552,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
struct net_device *netdev = adapter->netdev;
u32 err;
+ adapter->hw.hw_addr = adapter->io_addr;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
@@ -5515,6 +5566,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
e_dev_err("Cannot enable PCI device from suspend\n");
return err;
}
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
pci_wake_from_d3(pdev, false);
@@ -5612,7 +5665,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_release_hw_control(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return 0;
}
@@ -6016,6 +6070,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *upper;
+ struct list_head *iter;
u32 link_speed = adapter->link_speed;
bool flow_rx, flow_tx;
@@ -6067,6 +6123,21 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+ /* enable transmits */
+ netif_tx_wake_all_queues(adapter->netdev);
+
+ /* enable any upper devices */
+ rtnl_lock();
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv)
+ netif_tx_wake_all_queues(upper);
+ }
+ }
+ rtnl_unlock();
+
/* update the default user priority for VFs */
ixgbe_update_default_up(adapter);
@@ -6454,7 +6525,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -6514,12 +6585,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
l4_hdr = ip_hdr(skb)->protocol;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
@@ -6794,9 +6865,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
hdr.network = skb_network_header(first->skb);
/* Currently only IPv4/IPv6 with TCP is supported */
- if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
+ if ((first->protocol != htons(ETH_P_IPV6) ||
hdr.ipv6->nexthdr != IPPROTO_TCP) &&
- (first->protocol != __constant_htons(ETH_P_IP) ||
+ (first->protocol != htons(ETH_P_IP) ||
hdr.ipv4->protocol != IPPROTO_TCP))
return;
@@ -6829,12 +6900,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
* and write the value to source port portion of compressed dword
*/
if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
- common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+ common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
else
common.port.src ^= th->dest ^ first->protocol;
common.port.dst ^= th->source;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
} else {
@@ -6900,8 +6971,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
* or FIP and we have FCoE enabled on the adapter
*/
switch (vlan_get_protocol(skb)) {
- case __constant_htons(ETH_P_FCOE):
- case __constant_htons(ETH_P_FIP):
+ case htons(ETH_P_FCOE):
+ case htons(ETH_P_FIP):
adapter = netdev_priv(dev);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
@@ -6962,7 +7033,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN check the next protocol and store the tag */
- } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
@@ -6974,9 +7045,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
}
- skb_tx_timestamp(skb);
-
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ &adapter->state))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
@@ -6986,6 +7057,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
schedule_work(&adapter->ptp_tx_work);
}
+ skb_tx_timestamp(skb);
+
#ifdef CONFIG_PCI_IOV
/*
* Use the l2switch_enable flag - would be false if the DMA
@@ -7021,7 +7094,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
- if ((protocol == __constant_htons(ETH_P_FCOE)) &&
+ if ((protocol == htons(ETH_P_FCOE)) &&
(tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
tso = ixgbe_fso(tx_ring, first, &hdr_len);
if (tso < 0)
@@ -7143,7 +7216,9 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
+ return ixgbe_ptp_set_ts_config(adapter, req);
+ case SIOCGHWTSTAMP:
+ return ixgbe_ptp_get_ts_config(adapter, req);
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -7234,10 +7309,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
}
@@ -7250,10 +7325,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
}
@@ -7792,6 +7867,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
case IXGBE_DEV_ID_82599_SFP:
/* Only these subdevices could supports WOL */
switch (subdevice_id) {
+ case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
case IXGBE_SUBDEV_ID_82599_560FLR:
/* only support first port */
if (hw->bus.func != 0)
@@ -7969,10 +8045,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /* Cache if MNG FW is up so we don't have to read the REG later */
- if (hw->mac.ops.mng_fw_enabled)
- hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
-
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -8223,7 +8295,7 @@ skip_sriov:
ixgbe_dbg_adapter_init(adapter);
/* Need link setup for MNG FW, else wait for IXGBE_UP */
- if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
+ if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw,
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
@@ -8244,7 +8316,8 @@ err_alloc_etherdev:
pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return err;
}
@@ -8313,7 +8386,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
}
/**
@@ -8331,6 +8405,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = adapter->netdev;
#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *bdev, *vfdev;
u32 dw0, dw1, dw2, dw3;
int vf, pos;
@@ -8351,10 +8426,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if (!pos)
goto skip_bad_vf_detection;
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+ dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
+ dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
+ dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
+ dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
+ if (ixgbe_removed(hw->hw_addr))
+ goto skip_bad_vf_detection;
req_id = dw1 >> 16;
/* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
@@ -8417,14 +8494,20 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
skip_bad_vf_detection:
#endif /* CONFIG_PCI_IOV */
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
ixgbe_down(adapter);
- pci_disable_device(pdev);
+
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -8446,6 +8529,9 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
e_err(probe, "Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBE_DISABLED, &adapter->state);
+ adapter->hw.hw_addr = adapter->io_addr;
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index cc3101afd29f..f5c6af2b891b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index e44ff47659b5..a9b9ad69ed0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 132557c318f8..23f765263f12 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -98,6 +99,32 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability. For MAC's that don't
+ * have this bit just return false since the link can not be blocked
+ * via this method.
+ **/
+bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
+{
+ u32 mmngc;
+
+ /* If we don't have this bit, it can't be blocking */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
+ if (mmngc & IXGBE_MMNGC_MNG_VETO) {
+ hw_dbg(hw, "MNG_VETO bit detected.\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ixgbe_get_phy_id - Get the phy type
* @hw: pointer to hardware structure
*
@@ -172,6 +199,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
(IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
goto out;
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
@@ -476,6 +507,10 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
autoneg_reg);
}
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, &autoneg_reg);
@@ -682,6 +717,10 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
autoneg_reg);
}
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, &autoneg_reg);
@@ -759,6 +798,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 i;
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
/* reset the PHY and poll for completion */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index fffcbdd2bf0e..0bb047f751c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -65,9 +66,6 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
@@ -79,7 +77,6 @@
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
-
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
@@ -131,6 +128,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
+bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5184e2a1a7d8..63515a6f67fa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -492,6 +493,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
@@ -511,13 +513,10 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
IXGBE_PTP_TX_TIMEOUT);
u32 tsynctxctl;
- /* we have to have a valid skb */
- if (!adapter->ptp_tx_skb)
- return;
-
if (timeout) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
e_warn(drv, "clearing Tx Timestamp hang");
return;
}
@@ -576,14 +575,21 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
shhwtstamps->hwtstamp = ns_to_ktime(ns);
}
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
+}
+
/**
- * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping
+ * ixgbe_ptp_set_ts_config - control hardware time stamping
* @adapter: pointer to adapter struct
* @ifreq: ioctl data
- * @cmd: particular ioctl requested
*
* Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
+ * disable it when requested, although it shouldn't cause any overhead
* when no packet needs it. At most one packet in the queue may be
* marked for time stamping, otherwise it would be impossible to tell
* for sure to which packet the hardware time stamp belongs.
@@ -599,8 +605,7 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
* Event mode. This more accurately tells the user what the hardware is going
* to do anyways.
*/
-int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
- struct ifreq *ifr, int cmd)
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
{
struct ixgbe_hw *hw = &adapter->hw;
struct hwtstamp_config config;
@@ -702,6 +707,10 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -809,6 +818,9 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
IXGBE_WRITE_FLUSH(hw);
+ /* Reset the saved tstamp_config */
+ memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+
ixgbe_ptp_start_cyclecounter(adapter);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -840,7 +852,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
- snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+ snprintf(adapter->ptp_caps.name,
+ sizeof(adapter->ptp_caps.name),
+ "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -854,7 +868,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.enable = ixgbe_ptp_enable;
break;
case ixgbe_mac_82599EB:
- snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+ snprintf(adapter->ptp_caps.name,
+ sizeof(adapter->ptp_caps.name),
+ "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -911,6 +927,7 @@ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
if (adapter->ptp_clock) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dff0977876f7..e6c68d396c99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 8bd29190514e..139eaddfb2ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index e74ae3682733..ef6df3d6437e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0d39cfc4a3bf..8a6ff2423f07 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -54,6 +55,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
@@ -1609,6 +1611,9 @@ enum {
#define IXGBE_MACC_FS 0x00040000
#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+/* Veto Bit definiton */
+#define IXGBE_MMNGC_MNG_VETO 0x00000001
+
/* LINKS Bit Masks */
#define IXGBE_LINKS_KX_AN_COMP 0x80000000
#define IXGBE_LINKS_UP 0x40000000
@@ -1788,6 +1793,9 @@ enum {
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
+#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */
+#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */
+
#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
#endif
@@ -1853,8 +1861,19 @@ enum {
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
+#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
+#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0
+#define IXGBE_PCIDEVCTRL2_50_100us 0x1
+#define IXGBE_PCIDEVCTRL2_1_2ms 0x2
+#define IXGBE_PCIDEVCTRL2_16_32ms 0x5
+#define IXGBE_PCIDEVCTRL2_65_130ms 0x6
+#define IXGBE_PCIDEVCTRL2_260_520ms 0x9
+#define IXGBE_PCIDEVCTRL2_1_2s 0xa
+#define IXGBE_PCIDEVCTRL2_4_8s 0xd
+#define IXGBE_PCIDEVCTRL2_17_34s 0xe
+
/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -2645,7 +2664,6 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
- ixgbe_media_type_fiber_fixed,
ixgbe_media_type_fiber_qsfp,
ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
@@ -2858,6 +2876,8 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+ s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2901,7 +2921,6 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
- bool (*mng_fw_enabled)(struct ixgbe_hw *hw);
};
struct ixgbe_phy_operations {
@@ -2957,7 +2976,6 @@ struct ixgbe_mac_info {
u32 max_tx_queues;
u32 max_rx_queues;
u32 orig_autoc;
- u32 cached_autoc;
u32 orig_autoc2;
bool orig_link_settings_stored;
bool autotry_restart;
@@ -3033,7 +3051,6 @@ struct ixgbe_hw {
bool adapter_stopped;
bool force_full_reset;
bool allow_unsupported_sfp;
- bool mng_fw_enabled;
bool wol_enabled;
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 24b80a6cfca4..188a5974b85c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -61,6 +62,7 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -187,7 +189,6 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
goto out;
ret_val = ixgbe_start_hw_gen2(hw);
- hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
out:
return ret_val;
}
@@ -854,7 +855,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
- .mng_fw_enabled = NULL,
+ .prot_autoc_read = &prot_autoc_read_generic,
+ .prot_autoc_write = &prot_autoc_write_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index f68b78c732a8..1baecb60f065 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -530,41 +530,55 @@ static const u32 register_test_patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
};
-#define REG_PATTERN_TEST(R, M, W) \
-{ \
- u32 pat, val, before; \
- for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
- before = readl(adapter->hw.hw_addr + R); \
- writel((register_test_patterns[pat] & W), \
- (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if (val != (register_test_patterns[pat] & W & M)) { \
- hw_dbg(&adapter->hw, \
- "pattern test reg %04X failed: got " \
- "0x%08X expected 0x%08X\n", \
- R, val, (register_test_patterns[pat] & W & M)); \
- *data = R; \
- writel(before, adapter->hw.hw_addr + R); \
- return 1; \
- } \
- writel(before, adapter->hw.hw_addr + R); \
- } \
+static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ u32 pat, val, before;
+
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ *data = 1;
+ return true;
+ }
+ for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
+ before = ixgbevf_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg,
+ register_test_patterns[pat] & write);
+ val = ixgbevf_read_reg(&adapter->hw, reg);
+ if (val != (register_test_patterns[pat] & write & mask)) {
+ hw_dbg(&adapter->hw,
+ "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+ reg, val,
+ register_test_patterns[pat] & write & mask);
+ *data = reg;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
+ }
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ }
+ return false;
}
-#define REG_SET_AND_CHECK(R, M, W) \
-{ \
- u32 val, before; \
- before = readl(adapter->hw.hw_addr + R); \
- writel((W & M), (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if ((W & M) != (val & M)) { \
- pr_err("set/check reg %04X test failed: got 0x%08X expected " \
- "0x%08X\n", R, (val & M), (W & M)); \
- *data = R; \
- writel(before, (adapter->hw.hw_addr + R)); \
- return 1; \
- } \
- writel(before, (adapter->hw.hw_addr + R)); \
+static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ u32 val, before;
+
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ *data = 1;
+ return true;
+ }
+ before = ixgbevf_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, write & mask);
+ val = ixgbevf_read_reg(&adapter->hw, reg);
+ if ((write & mask) != (val & mask)) {
+ pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), write & mask);
+ *data = reg;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
+ }
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return false;
}
static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
@@ -572,6 +586,12 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
const struct ixgbevf_reg_test *test;
u32 i;
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter removed - register test blocked\n");
+ *data = 1;
+ return 1;
+ }
test = reg_test_vf;
/*
@@ -580,38 +600,47 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
+ bool b = false;
+
switch (test->test_type) {
case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_set_and_check(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * 0x40));
+ ixgbe_write_reg(&adapter->hw,
+ test->reg + (i * 0x40),
+ test->write);
break;
case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 4),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 8),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + 4 + (i * 8),
+ test->mask,
+ test->write);
break;
}
+ if (b)
+ return 1;
}
test++;
}
@@ -626,6 +655,14 @@ static void ixgbevf_diag_test(struct net_device *netdev,
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter removed - test blocked\n");
+ data[0] = 1;
+ data[1] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
set_bit(__IXGBEVF_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 54829326bb09..e7e7d695816b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -315,6 +315,11 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
+static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
+{
+ writel(value, ring->tail);
+}
+
#define IXGBEVF_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBEVF_TX_DESC(R, i) \
@@ -401,6 +406,7 @@ struct ixgbevf_adapter {
u64 bp_tx_missed;
#endif
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 link_speed;
bool link_up;
@@ -412,7 +418,9 @@ struct ixgbevf_adapter {
enum ixbgevf_state_t {
__IXGBEVF_TESTING,
__IXGBEVF_RESETTING,
- __IXGBEVF_DOWN
+ __IXGBEVF_DOWN,
+ __IXGBEVF_DISABLED,
+ __IXGBEVF_REMOVING,
};
struct ixgbevf_cb {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9df28985eba7..4ba139b2d25a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -99,6 +99,49 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
+static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
+{
+ struct ixgbevf_adapter *adapter = hw->back;
+
+ if (!hw->hw_addr)
+ return;
+ hw->hw_addr = NULL;
+ dev_err(&adapter->pdev->dev, "Adapter removed\n");
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
+{
+ u32 value;
+
+ /* The following check not only optimizes a bit by not
+ * performing a read on the status register when the
+ * register just read was a status register read that
+ * returned IXGBE_FAILED_READ_REG. It also blocks any
+ * potential recursion.
+ */
+ if (reg == IXGBE_VFSTATUS) {
+ ixgbevf_remove_adapter(hw);
+ return;
+ }
+ value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
+ if (value == IXGBE_FAILED_READ_REG)
+ ixgbevf_remove_adapter(hw);
+}
+
+u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (IXGBE_REMOVED(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbevf_check_remove(hw, reg);
+ return value;
+}
+
static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
u32 val)
{
@@ -111,7 +154,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
* such as IA-64).
*/
wmb();
- writel(val, rx_ring->tail);
+ ixgbevf_write_tail(rx_ring, val);
}
/**
@@ -516,7 +559,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* Workaround hardware that can't do proper VEPA multicast
* source pruning.
*/
- if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
+ if ((skb->pkt_type == PACKET_BROADCAST ||
+ skb->pkt_type == PACKET_MULTICAST) &&
ether_addr_equal(rx_ring->netdev->dev_addr,
eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
@@ -607,7 +651,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
ixgbevf_set_itr(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
1 << q_vector->v_idx);
@@ -832,7 +877,8 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
hw->mac.get_link_status = 1;
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1136,7 +1182,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
@@ -1256,6 +1302,8 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (IXGBE_REMOVED(hw->hw_addr))
+ return;
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -1281,6 +1329,8 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (IXGBE_REMOVED(hw->hw_addr))
+ return;
do {
usleep_range(1000, 2000);
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
@@ -1315,7 +1365,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
/* reset ntu and ntc to place SW in sync with hardwdare */
ring->next_to_clean = 0;
@@ -1617,6 +1667,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
spin_unlock_bh(&adapter->mbx_lock);
+ smp_mb__before_clear_bit();
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1741,7 +1792,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
int i;
/* signal that we are down to the interrupt handler */
- set_bit(__IXGBEVF_DOWN, &adapter->state);
+ if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
+ return; /* do nothing if already down */
/* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++)
@@ -1817,7 +1869,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
int vectors)
{
- int err = 0;
int vector_threshold;
/* We'll want at least 2 (vector_threshold):
@@ -1831,33 +1882,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err || err < 0) /* Success or a nasty failure. */
- break;
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
-
- if (vectors < vector_threshold)
- err = -ENOMEM;
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (err) {
+ if (vectors < 0) {
dev_err(&adapter->pdev->dev,
"Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- } else {
- /*
- * Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
- * vectors we were allocated.
- */
- adapter->num_msix_vectors = vectors;
+ return vectors;
}
- return err;
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+
+ return 0;
}
/**
@@ -2338,6 +2380,7 @@ static void ixgbevf_reset_task(struct work_struct *work)
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
return;
@@ -2361,6 +2404,14 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
bool link_up = adapter->link_up;
s32 need_reset;
+ if (IXGBE_REMOVED(hw->hw_addr)) {
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ rtnl_lock();
+ ixgbevf_down(adapter);
+ rtnl_unlock();
+ }
+ return;
+ }
ixgbevf_queue_reset_subtask(adapter);
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
@@ -2422,7 +2473,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
pf_has_reset:
/* Reset the timer */
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + (2 * HZ)));
@@ -2787,6 +2839,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -2857,12 +2912,12 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_hdr = 0;
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
l4_hdr = ip_hdr(skb)->protocol;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
@@ -3060,7 +3115,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_ring->next_to_use = i;
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ ixgbevf_write_tail(tx_ring, i);
return;
dma_error:
@@ -3165,7 +3220,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = ixgbevf_tso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
- else
+ else if (!tso)
ixgbevf_tx_csum(tx_ring, first);
ixgbevf_tx_map(tx_ring, first, hdr_len);
@@ -3274,7 +3329,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
return retval;
#endif
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return 0;
}
@@ -3286,7 +3342,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 err;
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
* pci_restore_state clears dev->state_saved so call
@@ -3299,6 +3354,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
ixgbevf_reset(adapter);
@@ -3344,10 +3401,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
ring = adapter->rx_ring[i];
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
@@ -3355,10 +3412,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
@@ -3460,6 +3517,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
+ adapter->io_addr = hw->hw_addr;
if (!hw->hw_addr) {
err = -EIO;
goto err_ioremap;
@@ -3545,14 +3603,15 @@ err_register:
ixgbevf_clear_interrupt_scheme(adapter);
err_sw_init:
ixgbevf_reset_interrupt_capability(adapter);
- iounmap(hw->hw_addr);
+ iounmap(adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return err;
}
@@ -3570,7 +3629,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- set_bit(__IXGBEVF_DOWN, &adapter->state);
+ set_bit(__IXGBEVF_REMOVING, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
@@ -3583,14 +3642,15 @@ static void ixgbevf_remove(struct pci_dev *pdev)
ixgbevf_clear_interrupt_scheme(adapter);
ixgbevf_reset_interrupt_capability(adapter);
- iounmap(adapter->hw.hw_addr);
+ iounmap(adapter->io_addr);
pci_release_regions(pdev);
hw_dbg(&adapter->hw, "Remove complete\n");
free_netdev(netdev);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
}
/**
@@ -3607,15 +3667,20 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
ixgbevf_down(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -3639,6 +3704,8 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
ixgbevf_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index debd8c0e1f28..09dd8f698bea 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -70,16 +70,6 @@
#define IXGBE_VFGOTC_MSB 0x02024
#define IXGBE_VFMPRC 0x01034
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
-
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
- writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
-
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
- readl((a)->hw_addr + (reg) + ((offset) << 2)))
-
#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 7b1f502d1716..3061d1890471 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -172,6 +172,37 @@ struct ixgbevf_info {
const struct ixgbe_mac_operations *mac_ops;
};
+#define IXGBE_FAILED_READ_REG 0xffffffffU
+
+#define IXGBE_REMOVED(a) unlikely(!(a))
+
+static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+ if (IXGBE_REMOVED(reg_addr))
+ return;
+ writel(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v)
+
+u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg);
+#define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r)
+
+static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg,
+ u32 offset, u32 value)
+{
+ ixgbe_write_reg(hw, reg + (offset << 2), value);
+}
+#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v)
+
+static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
+ u32 offset)
+{
+ return ixgbevf_read_reg(hw, reg + (offset << 2));
+}
+#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
+
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f5685c0d0579..b0c6050479eb 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2054,19 +2054,6 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
}
static int
-jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
-{
- if (unlikely(skb_shinfo(skb)->gso_size &&
- skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
- dev_kfree_skb(skb);
- return -1;
- }
-
- return 0;
-}
-
-static int
jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
{
*mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
@@ -2225,7 +2212,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
struct jme_adapter *jme = netdev_priv(netdev);
int idx;
- if (unlikely(jme_expand_header(jme, skb))) {
+ if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) {
+ dev_kfree_skb_any(skb);
++(NET_STAT(jme).tx_dropped);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a2565ce22b7c..b7b8d74c22d9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -730,7 +730,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
unlikely(tag_bytes & ~12)) {
if (skb_checksum_help(skb) == 0)
goto no_csum;
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return 1;
}
@@ -819,7 +819,7 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
if (net_ratelimit())
netdev_err(dev, "tx queue full?!\n");
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index fd409d76b811..b161a525fc5b 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -167,11 +167,6 @@ out:
return ret;
}
-static int orion_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
{
struct orion_mdio_dev *dev = dev_id;
@@ -209,7 +204,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
bus->name = "orion_mdio_bus";
bus->read = orion_mdio_read;
bus->write = orion_mdio_write;
- bus->reset = orion_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
dev_name(&pdev->dev));
bus->parent = &pdev->dev;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 8d76fca7fde7..d04b1c3c9b85 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -510,12 +510,12 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
cpu_stats = per_cpu_ptr(pp->stats, cpu);
do {
- start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -2761,7 +2761,6 @@ static int mvneta_probe(struct platform_device *pdev)
const char *mac_from;
int phy_mode;
int err;
- int cpu;
/* Our multiqueue support is not complete, so for now, only
* allow the usage of the first RX queue
@@ -2816,30 +2815,19 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENODEV;
- goto err_clk;
- }
-
pp->base = devm_ioremap_resource(&pdev->dev, res);
- if (pp->base == NULL) {
+ if (IS_ERR(pp->base)) {
err = PTR_ERR(pp->base);
goto err_clk;
}
/* Alloc per-cpu stats */
- pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+ pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
if (!pp->stats) {
err = -ENOMEM;
goto err_clk;
}
- for_each_possible_cpu(cpu) {
- struct mvneta_pcpu_stats *stats;
- stats = per_cpu_ptr(pp->stats, cpu);
- u64_stats_init(&stats->syncp);
- }
-
dt_mac_addr = of_get_mac_address(dn);
if (dt_mac_addr) {
mac_from = "device tree";
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 597846193869..7f81ae66cc89 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2845,7 +2845,7 @@ mapping_unwind:
mapping_error:
if (net_ratelimit())
dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -3172,7 +3172,7 @@ static void skge_tx_done(struct net_device *dev)
pkts_compl++;
bytes_compl += e->skb->len;
- dev_kfree_skb(e->skb);
+ dev_consume_skb_any(e->skb);
}
}
netdev_completed_queue(dev, pkts_compl, bytes_compl);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 55a37ae11440..b81106451a0a 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -44,6 +44,8 @@
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/mii.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
#include <asm/irq.h>
@@ -2000,7 +2002,7 @@ mapping_unwind:
mapping_error:
if (net_ratelimit())
dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2733,6 +2735,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
unsigned int total_bytes[2] = { 0 };
unsigned int total_packets[2] = { 0 };
+ if (to_do <= 0)
+ return work_done;
+
rmb();
do {
struct sky2_port *sky2;
@@ -3906,19 +3911,19 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
u64 _bytes, _packets;
do {
- start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
_bytes = sky2->rx_stats.bytes;
_packets = sky2->rx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
stats->rx_packets = _packets;
stats->rx_bytes = _bytes;
do {
- start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
_bytes = sky2->tx_stats.bytes;
_packets = sky2->tx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
stats->tx_packets = _packets;
stats->tx_bytes = _bytes;
@@ -4748,6 +4753,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
{
struct sky2_port *sky2;
struct net_device *dev = alloc_etherdev(sizeof(*sky2));
+ const void *iap;
if (!dev)
return NULL;
@@ -4805,8 +4811,16 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
dev->features |= dev->hw_features;
- /* read the mac address */
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+ /* try to get mac address in the following order:
+ * 1) from device tree data
+ * 2) from internal registers set by bootloader
+ */
+ iap = of_get_mac_address(hw->pdev->dev.of_node);
+ if (iap)
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
+ else
+ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
+ ETH_ALEN);
return dev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 563495d8975a..1486ce902a56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -3,7 +3,7 @@
#
config MLX4_EN
- tristate "Mellanox Technologies 10Gbit Ethernet support"
+ tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
depends on PCI
select MLX4_CORE
select PTP_1588_CLOCK
@@ -23,6 +23,13 @@ config MLX4_EN_DCB
If unsure, set to Y
+config MLX4_EN_VXLAN
+ bool "VXLAN offloads Support"
+ default y
+ depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m)
+ ---help---
+ Say Y here if you want to use VXLAN offloads in the driver.
+
config MLX4_CORE
tristate
depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0d02fba94536..78099eab7673 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -800,16 +800,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
-static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
- struct mlx4_vhcr *vhcr,
- struct mlx4_cmd_mailbox *inbox,
- struct mlx4_cmd_mailbox *outbox,
- struct mlx4_cmd_info *cmd)
-{
- return -EPERM;
-}
-
-static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
+static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
@@ -964,6 +955,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.wrapper = NULL
},
{
+ .opcode = MLX4_CMD_CONFIG_DEV,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CMD_EPERM_wrapper
+ },
+ {
.opcode = MLX4_CMD_ALLOC_RES,
.has_inbox = false,
.has_outbox = false,
@@ -1258,7 +1258,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = MLX4_CMD_UPDATE_QP_wrapper
+ .wrapper = mlx4_CMD_EPERM_wrapper
},
{
.opcode = MLX4_CMD_GET_OP_REQ,
@@ -1267,7 +1267,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = MLX4_CMD_GET_OP_REQ_wrapper,
+ .wrapper = mlx4_CMD_EPERM_wrapper,
},
{
.opcode = MLX4_CMD_CONF_SPECIAL_QP,
@@ -1378,7 +1378,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper
+ .wrapper = mlx4_CMD_EPERM_wrapper
},
};
@@ -1643,8 +1643,16 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
int port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
-
- for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+ &priv->dev, slave);
+ int min_port = find_first_bit(actv_ports.ports,
+ priv->dev.caps.num_ports) + 1;
+ int max_port = min_port - 1 +
+ bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
+
+ for (port = min_port; port <= max_port; port++) {
+ if (!test_bit(port - 1, actv_ports.ports))
+ continue;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
vp_oper->state = *vp_admin;
@@ -1685,8 +1693,17 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
{
int port;
struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+ &priv->dev, slave);
+ int min_port = find_first_bit(actv_ports.ports,
+ priv->dev.caps.num_ports) + 1;
+ int max_port = min_port - 1 +
+ bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
+
- for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ for (port = min_port; port <= max_port; port++) {
+ if (!test_bit(port - 1, actv_ports.ports))
+ continue;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (NO_INDX != vp_oper->vlan_idx) {
__mlx4_unregister_vlan(&priv->dev,
@@ -2234,6 +2251,112 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
return vf+1;
}
+int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
+{
+ if (slave < 1 || slave > dev->num_vfs) {
+ mlx4_err(dev,
+ "Bad slave number:%d (number of activated slaves: %lu)\n",
+ slave, dev->num_slaves);
+ return -EINVAL;
+ }
+ return slave - 1;
+}
+
+struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_active_ports actv_ports;
+ int vf;
+
+ bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
+
+ if (slave == 0) {
+ bitmap_fill(actv_ports.ports, dev->caps.num_ports);
+ return actv_ports;
+ }
+
+ vf = mlx4_get_vf_indx(dev, slave);
+ if (vf < 0)
+ return actv_ports;
+
+ bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
+ min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
+ dev->caps.num_ports));
+
+ return actv_ports;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
+
+int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
+{
+ unsigned n;
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+ unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+
+ if (port <= 0 || port > m)
+ return -EINVAL;
+
+ n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+ if (port <= n)
+ port = n + 1;
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
+
+int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
+{
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+ if (test_bit(port - 1, actv_ports.ports))
+ return port -
+ find_first_bit(actv_ports.ports, dev->caps.num_ports);
+
+ return -1;
+}
+EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
+
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
+ int port)
+{
+ unsigned i;
+ struct mlx4_slaves_pport slaves_pport;
+
+ bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
+
+ if (port <= 0 || port > dev->caps.num_ports)
+ return slaves_pport;
+
+ for (i = 0; i < dev->num_vfs + 1; i++) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, i);
+ if (test_bit(port - 1, actv_ports.ports))
+ set_bit(i, slaves_pport.slaves);
+ }
+
+ return slaves_pport;
+}
+EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
+
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
+ struct mlx4_dev *dev,
+ const struct mlx4_active_ports *crit_ports)
+{
+ unsigned i;
+ struct mlx4_slaves_pport slaves_pport;
+
+ bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
+
+ for (i = 0; i < dev->num_vfs + 1; i++) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, i);
+ if (bitmap_equal(crit_ports->ports, actv_ports.ports,
+ dev->caps.num_ports))
+ set_bit(i, slaves_pport.slaves);
+ }
+
+ return slaves_pport;
+}
+EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
+
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2289,6 +2412,30 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
+ /* mlx4_get_slave_default_vlan -
+ * return true if VST ( default vlan)
+ * if VST, will return vlan & qos (if not NULL)
+ */
+bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
+ u16 *vlan, u8 *qos)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_priv *priv;
+
+ priv = mlx4_priv(dev);
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+
+ if (MLX4_VGT != vp_oper->state.default_vlan) {
+ if (vlan)
+ *vlan = vp_oper->state.default_vlan;
+ if (qos)
+ *qos = vp_oper->state.default_qos;
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
+
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
{
struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index abaf6bb22416..57dda95b67d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -276,6 +276,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = mlx4_en_phc_adjfreq,
.adjtime = mlx4_en_phc_adjtime,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b4881b686159..c95ca252187c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -62,7 +62,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
int has_ets_tc = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
+ if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
i, ets->prio_tc[i]);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index d357bf5a4686..0c59d4fe7e3a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -72,6 +72,12 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
" Per priority bit mask");
+MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
+ "Threshold for using inline data (range: 17-104, default: 104)");
+
+#define MAX_PFC_TX 0xff
+#define MAX_PFC_RX 0xff
+
int en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...)
{
@@ -140,6 +146,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
MLX4_EN_NUM_UP;
params->prof[i].rss_rings = 0;
+ params->prof[i].inline_thold = inline_thold;
}
return 0;
@@ -274,19 +281,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_init_timestamp(mdev);
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- if (!dev->caps.comp_pool) {
- mdev->profile.prof[i].rx_ring_num =
- rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
- min_t(int,
- dev->caps.num_comp_vectors,
- DEF_RX_RINGS)));
- } else {
- mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
- min_t(int, dev->caps.comp_pool/
- dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
- }
- }
+ /* Set default number of RX rings*/
+ mlx4_en_set_num_rx_rings(mdev);
/* Create our own workqueue for reset/multicast tasks
* Note: we cannot use the shared workqueue because of deadlocks caused
@@ -336,8 +332,31 @@ static struct mlx4_interface mlx4_en_interface = {
.protocol = MLX4_PROT_ETH,
};
+static void mlx4_en_verify_params(void)
+{
+ if (pfctx > MAX_PFC_TX) {
+ pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
+ pfctx, MAX_PFC_TX);
+ pfctx = 0;
+ }
+
+ if (pfcrx > MAX_PFC_RX) {
+ pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
+ pfcrx, MAX_PFC_RX);
+ pfcrx = 0;
+ }
+
+ if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
+ pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
+ inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
+ inline_thold = MAX_INLINE;
+ }
+}
+
static int __init mlx4_en_init(void)
{
+ mlx4_en_verify_params();
+
return mlx4_register_interface(&mlx4_en_interface);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 84a96f70dfb5..f085c2df5e69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -39,6 +39,7 @@
#include <linux/hash.h>
#include <net/ip.h>
#include <net/busy_poll.h>
+#include <net/vxlan.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -603,7 +604,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
int err = 0;
u64 reg_id;
int *qpn = &priv->base_qpn;
- u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
@@ -672,7 +673,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
u64 mac;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+ mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
@@ -685,7 +686,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_en_mac_to_u64(entry->mac);
+ mac = mlx4_mac_to_u64(entry->mac);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
@@ -715,14 +716,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
- u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
+ u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
- u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
+ u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -759,18 +760,6 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
}
-u64 mlx4_en_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
{
int err = 0;
@@ -1089,7 +1078,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
- mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
+ mcast_addr = mlx4_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -1181,7 +1170,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
found = true;
if (!found) {
- mac = mlx4_en_mac_to_u64(entry->mac);
+ mac = mlx4_mac_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
@@ -1224,7 +1213,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
- mac = mlx4_en_mac_to_u64(ha->addr);
+ mac = mlx4_mac_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
@@ -1677,7 +1666,7 @@ int mlx4_en_start_port(struct net_device *dev)
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
if (err) {
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
err);
@@ -1709,6 +1698,10 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+#ifdef CONFIG_MLX4_EN_VXLAN
+ if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+ vxlan_get_rx_port(dev);
+#endif
priv->port_up = true;
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -2216,7 +2209,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
- u64 mac_u64 = mlx4_en_mac_to_u64(mac);
+ u64 mac_u64 = mlx4_mac_to_u64(mac);
if (!is_valid_ether_addr(mac))
return -EINVAL;
@@ -2276,6 +2269,83 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
return 0;
}
+#ifdef CONFIG_MLX4_EN_VXLAN
+static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
+{
+ int ret;
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ vxlan_add_task);
+
+ ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
+ if (ret)
+ goto out;
+
+ ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+ VXLAN_STEER_BY_OUTER_MAC, 1);
+out:
+ if (ret)
+ en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+}
+
+static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+{
+ int ret;
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ vxlan_del_task);
+
+ ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+ VXLAN_STEER_BY_OUTER_MAC, 0);
+ if (ret)
+ en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+
+ priv->vxlan_port = 0;
+}
+
+static void mlx4_en_add_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 current_port;
+
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
+ return;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ current_port = priv->vxlan_port;
+ if (current_port && current_port != port) {
+ en_warn(priv, "vxlan port %d configured, can't add port %d\n",
+ ntohs(current_port), ntohs(port));
+ return;
+ }
+
+ priv->vxlan_port = port;
+ queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
+}
+
+static void mlx4_en_del_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 current_port;
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ current_port = priv->vxlan_port;
+ if (current_port != port) {
+ en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
+ return;
+ }
+
+ queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
+}
+#endif
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2302,6 +2372,10 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
+#ifdef CONFIG_MLX4_EN_VXLAN
+ .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
+ .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+#endif
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2351,7 +2425,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
- dev->dev_id = port - 1;
+ dev->dev_port = port - 1;
/*
* Initialize driver private data
@@ -2393,6 +2467,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
+#ifdef CONFIG_MLX4_EN_VXLAN
+ INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
+ INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
+#endif
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
@@ -2417,7 +2495,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mlx4_is_slave(priv->mdev->dev)) {
eth_hw_addr_random(dev);
en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
- mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
+ mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
mdev->dev->caps.def_mac[priv->port] = mac_u64;
} else {
en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
@@ -2526,7 +2604,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
if (err) {
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index dae1a1f4ae55..c2cfb05e7290 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_packets = 0;
stats->tx_bytes = 0;
priv->port_stats.tx_chksum_offload = 0;
+ priv->port_stats.queue_stopped = 0;
+ priv->port_stats.wake_queue = 0;
+
for (i = 0; i < priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i]->packets;
stats->tx_bytes += priv->tx_ring[i]->bytes;
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
+ priv->port_stats.queue_stopped +=
+ priv->tx_ring[i]->queue_stopped;
+ priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 890922c1c8ee..ba049ae88749 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -318,6 +318,31 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
}
}
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
+{
+ int i;
+ int num_of_eqs;
+ int num_rx_rings;
+ struct mlx4_dev *dev = mdev->dev;
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ if (!dev->caps.comp_pool)
+ num_of_eqs = max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ dev->caps.num_comp_vectors,
+ DEF_RX_RINGS));
+ else
+ num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
+ dev->caps.comp_pool/
+ dev->caps.num_ports) - 1;
+
+ num_rx_rings = min_t(int, num_of_eqs,
+ netif_get_num_default_rss_queues());
+ mdev->profile.prof[i].rx_ring_num =
+ rounddown_pow_of_two(num_rx_rings);
+ }
+}
+
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node)
@@ -636,6 +661,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (!priv->port_up)
return 0;
+ if (budget <= 0)
+ return polled;
+
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index c11d063473e5..03e5f6ac67e7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -129,8 +129,10 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
- /* The device currently only supports 10G speed */
- if (priv->port_state.link_speed != SPEED_10000)
+ /* The device supports 1G, 10G and 40G speeds */
+ if (priv->port_state.link_speed != 1000 &&
+ priv->port_state.link_speed != 10000 &&
+ priv->port_state.link_speed != 40000)
return priv->port_state.link_speed;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 13457032d15f..dd1f6d346459 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,16 +44,6 @@
#include "mlx4_en.h"
-enum {
- MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
- MAX_BF = 256,
-};
-
-static int inline_thold __read_mostly = MAX_INLINE;
-
-module_param_named(inline_thold, inline_thold, int, 0444);
-MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
-
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
u16 stride, int node, int queue_index)
@@ -75,8 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
-
- inline_thold = min(inline_thold, MAX_INLINE);
+ ring->inline_thold = priv->prof->inline_thold;
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = vmalloc_node(tmp, node);
@@ -325,7 +314,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
}
}
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return tx_info->nr_txbb;
}
@@ -456,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
*/
if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
netif_tx_wake_queue(ring->tx_queue);
- priv->port_stats.wake_queue++;
+ ring->wake_queue++;
}
return done;
}
@@ -520,7 +509,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
return ring->buf + index * TXBB_SIZE;
}
-static int is_inline(struct sk_buff *skb, void **pfrag)
+static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag)
{
void *ptr;
@@ -580,7 +569,7 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
}
} else {
*lso_header_size = 0;
- if (!is_inline(skb, NULL))
+ if (!is_inline(priv->prof->inline_thold, skb, NULL))
real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
else
real_size = inline_size(skb);
@@ -596,7 +585,13 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
if (skb->len <= spc) {
- inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ if (likely(skb->len >= MIN_PKT_LEN)) {
+ inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ } else {
+ inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
+ memset(((void *)(inl + 1)) + skb->len, 0,
+ MIN_PKT_LEN - skb->len);
+ }
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
@@ -696,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* every full Tx ring stops queue */
netif_tx_stop_queue(ring->tx_queue);
- priv->port_stats.queue_stopped++;
+ ring->queue_stopped++;
/* If queue was emptied after the if, and before the
* stop_queue - need to wake the queue, or else it will remain
@@ -709,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(((int)(ring->prod - ring->cons)) <=
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
netif_tx_wake_queue(ring->tx_queue);
- priv->port_stats.wake_queue++;
+ ring->wake_queue++;
} else {
return NETDEV_TX_BUSY;
}
@@ -747,11 +742,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->data_offset = (void *)data - (void *)tx_desc;
tx_info->linear = (lso_header_size < skb_headlen(skb) &&
- !is_inline(skb, NULL)) ? 1 : 0;
+ !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0;
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
- if (is_inline(skb, &fragptr)) {
+ if (is_inline(ring->inline_thold, skb, &fragptr)) {
tx_info->inl = 1;
} else {
/* Map fragments */
@@ -881,7 +876,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
- *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
+ tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn);
+
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descirptor hits memory
* before setting ownership of this descriptor to HW */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8992b38578d5..d501a2b0fb79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -271,7 +271,10 @@ enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave,
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
- if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) {
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+
+ if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
+ port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port);
return SLAVE_PORT_DOWN;
@@ -285,8 +288,10 @@ static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
- if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+ if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
+ port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port);
return -1;
@@ -300,9 +305,13 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
{
int i;
enum slave_port_gen_event gen_event;
+ struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
+ port);
- for (i = 0; i < dev->num_slaves; i++)
- set_and_calc_slave_port_state(dev, i, port, event, &gen_event);
+ for (i = 0; i < dev->num_vfs + 1; i++)
+ if (test_bit(i, slaves_pport.slaves))
+ set_and_calc_slave_port_state(dev, i, port,
+ event, &gen_event);
}
/**************************************************************************
The function get as input the new event to that port,
@@ -321,12 +330,14 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
struct mlx4_slave_state *ctx = NULL;
unsigned long flags;
int ret = -1;
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
enum slave_port_state cur_state =
mlx4_get_slave_port_state(dev, slave, port);
*gen_event = SLAVE_PORT_GEN_EVENT_NONE;
- if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+ if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
+ port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port);
return ret;
@@ -542,15 +553,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be64_to_cpu(eqe->event.cmd.out_param));
break;
- case MLX4_EVENT_TYPE_PORT_CHANGE:
+ case MLX4_EVENT_TYPE_PORT_CHANGE: {
+ struct mlx4_slaves_pport slaves_port;
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
+ slaves_port = mlx4_phys_to_slaves_pport(dev, port);
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
if (!mlx4_is_master(dev))
break;
- for (i = 0; i < dev->num_slaves; i++) {
+ for (i = 0; i < dev->num_vfs + 1; i++) {
+ if (!test_bit(i, slaves_port.slaves))
+ continue;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
if (i == mlx4_master_func_num(dev))
continue;
@@ -558,8 +573,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
" to slave: %d, port:%d\n",
__func__, i, port);
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
- if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+ if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+ eqe->event.port_change.port =
+ cpu_to_be32(
+ (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+ | (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
+ }
} else { /* IB port */
set_and_calc_slave_port_state(dev, i, port,
MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
@@ -580,12 +600,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (!mlx4_is_master(dev))
break;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
- for (i = 0; i < dev->num_slaves; i++) {
+ for (i = 0; i < dev->num_vfs + 1; i++) {
+ if (!test_bit(i, slaves_port.slaves))
+ continue;
if (i == mlx4_master_func_num(dev))
continue;
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
- if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+ if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+ eqe->event.port_change.port =
+ cpu_to_be32(
+ (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+ | (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
+ }
}
else /* IB port */
/* port-up event will be sent to a slave when the
@@ -594,6 +621,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
}
break;
+ }
case MLX4_EVENT_TYPE_CQ_ERROR:
mlx4_warn(dev, "CQ %s on CQN %06x\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7e2995ecea6f..d16a4d118903 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -225,13 +225,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
if (vhcr->op_modifier == 1) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, slave);
+ int converted_port = mlx4_slave_convert_port(
+ dev, slave, vhcr->in_modifier);
+
+ if (converted_port < 0)
+ return -EINVAL;
+
+ vhcr->in_modifier = converted_port;
/* Set nic_info bit to mark new fields support */
field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
- field = vhcr->in_modifier; /* phys-port = logical-port */
+ /* phys-port = logical-port */
+ field = vhcr->in_modifier -
+ find_first_bit(actv_ports.ports, dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+ field = vhcr->in_modifier;
/* size is now the QP number */
size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
@@ -249,12 +261,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
QUERY_FUNC_CAP_PHYS_PORT_ID);
} else if (vhcr->op_modifier == 0) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, slave);
/* enable rdma and ethernet interfaces, and new quota locations */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
- field = dev->caps.num_ports;
+ field = min(
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports),
+ dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */
@@ -840,6 +856,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
int err = 0;
u8 field;
u32 bmme_flags;
+ int real_port;
+ int slave_port;
+ int first_port;
+ struct mlx4_active_ports actv_ports;
err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -852,8 +872,26 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
+ actv_ports = mlx4_get_active_ports(dev, slave);
+ first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+ for (slave_port = 0, real_port = first_port;
+ real_port < first_port +
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+ ++real_port, ++slave_port) {
+ if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
+ flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
+ else
+ flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
+ }
+ for (; slave_port < dev->caps.num_ports; ++slave_port)
+ flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
+ field &= ~0x0F;
+ field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
+
/* For guests, disable timestamp */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
field &= 0x7f;
@@ -903,12 +941,20 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
u16 short_field;
int err;
int admin_link_state;
+ int port = mlx4_slave_convert_port(dev, slave,
+ vhcr->in_modifier & 0xFF);
#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
#define MLX4_PORT_LINK_UP_MASK 0x80
#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
+ if (port < 0)
+ return -EINVAL;
+
+ vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
+ (port & 0xFF);
+
err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
@@ -935,7 +981,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, port_type,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
- short_field = 1; /* slave max gids */
+ if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
+ short_field = mlx4_get_slave_num_gids(dev, slave, port);
+ else
+ short_field = 1; /* slave max gids */
MLX4_PUT(outbox->buf, short_field,
QUERY_PORT_CUR_MAX_GID_OFFSET);
@@ -1585,9 +1634,12 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int port = vhcr->in_modifier;
+ int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
int err;
+ if (port < 0)
+ return -EINVAL;
+
if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
return 0;
@@ -1677,9 +1729,12 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int port = vhcr->in_modifier;
+ int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
int err;
+ if (port < 0)
+ return -EINVAL;
+
if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
(1 << port)))
return 0;
@@ -1724,6 +1779,46 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
MLX4_CMD_NATIVE);
}
+struct mlx4_config_dev {
+ __be32 update_flags;
+ __be32 rsdv1[3];
+ __be16 vxlan_udp_dport;
+ __be16 rsvd2;
+};
+
+#define MLX4_VXLAN_UDP_DPORT (1 << 0)
+
+static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
+{
+ int err;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
+
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
+{
+ struct mlx4_config_dev config_dev;
+
+ memset(&config_dev, 0, sizeof(config_dev));
+ config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
+ config_dev.vxlan_udp_dport = udp_port;
+
+ return mlx4_CONFIG_DEV(dev, &config_dev);
+}
+EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
+
+
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
@@ -1891,7 +1986,8 @@ void mlx4_opreq_action(struct work_struct *work)
err = EINVAL;
break;
}
- err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
+ err = mlx4_cmd(dev, 0, ((u32) err |
+ (__force u32)cpu_to_be32(token) << 16),
1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d413e60071d4..f0ae95f66ceb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,7 +41,6 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
#include <linux/kmod.h>
#include <linux/mlx4/device.h>
@@ -78,13 +77,17 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
-static int num_vfs;
-module_param(num_vfs, int, 0444);
-MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+static uint8_t num_vfs[3] = {0, 0, 0};
+static int num_vfs_argc = 3;
+module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
+ "num_vfs=port1,port2,port1+2");
-static int probe_vf;
-module_param(probe_vf, int, 0644);
-MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+static uint8_t probe_vf[3] = {0, 0, 0};
+static int probe_vfs_argc = 3;
+module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
+ "probe_vf=port1,port2,port1+2");
int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
module_param_named(log_num_mgm_entry_size,
@@ -1470,7 +1473,11 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
int i;
for (i = 1; i <= dev->caps.num_ports; i++) {
- dev->caps.gid_table_len[i] = 1;
+ if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
+ dev->caps.gid_table_len[i] =
+ mlx4_get_slave_num_gids(dev, 0, i);
+ else
+ dev->caps.gid_table_len[i] = 1;
dev->caps.pkey_table_len[i] =
dev->phys_caps.pkey_phys_table_len[i] - 1;
}
@@ -1495,7 +1502,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
if (mlx4_log_num_mgm_entry_size == -1 &&
dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
(!mlx4_is_mfunc(dev) ||
- (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
+ (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
dev->oper_log_mgm_entry_size =
@@ -1981,9 +1988,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int nreq = min_t(int, dev->caps.num_ports *
- min_t(int, netif_get_num_default_rss_queues() + 1,
+ min_t(int, num_online_cpus() + 1,
MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
- int err;
int i;
if (msi_x) {
@@ -1997,23 +2003,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
for (i = 0; i < nreq; ++i)
entries[i].entry = i;
- retry:
- err = pci_enable_msix(dev->pdev, entries, nreq);
- if (err) {
- /* Try again if at least 2 vectors are available */
- if (err > 1) {
- mlx4_info(dev, "Requested %d vectors, "
- "but only %d MSI-X vectors available, "
- "trying again\n", nreq, err);
- nreq = err;
- goto retry;
- }
+ nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
+
+ if (nreq < 0) {
kfree(entries);
goto no_msi;
- }
-
- if (nreq <
- MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ } else if (nreq < MSIX_LEGACY_SZ +
+ dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
@@ -2201,6 +2197,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
struct mlx4_dev *dev;
int err;
int port;
+ int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
+ {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
+ unsigned total_vfs = 0;
+ int sriov_initialized = 0;
+ unsigned int i;
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
@@ -2215,17 +2218,40 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
* per port, we must limit the number of VFs to 63 (since their are
* 128 MACs)
*/
- if (num_vfs >= MLX4_MAX_NUM_VF) {
+ for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
+ total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
+ nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
+ if (nvfs[i] < 0) {
+ dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
+ return -EINVAL;
+ }
+ }
+ for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
+ i++) {
+ prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
+ if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
+ dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
+ return -EINVAL;
+ }
+ }
+ if (total_vfs >= MLX4_MAX_NUM_VF) {
dev_err(&pdev->dev,
"Requested more VF's (%d) than allowed (%d)\n",
- num_vfs, MLX4_MAX_NUM_VF - 1);
+ total_vfs, MLX4_MAX_NUM_VF - 1);
return -EINVAL;
}
- if (num_vfs < 0) {
- pr_err("num_vfs module parameter cannot be negative\n");
- return -EINVAL;
+ for (i = 0; i < MLX4_MAX_PORTS; i++) {
+ if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+ dev_err(&pdev->dev,
+ "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+ nvfs[i] + nvfs[2], i + 1,
+ MLX4_MAX_NUM_VF_P_PORT - 1);
+ return -EINVAL;
+ }
}
+
+
/*
* Check for BARs.
*/
@@ -2300,11 +2326,23 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
/* When acting as pf, we normally skip vfs unless explicitly
* requested to probe them. */
- if (num_vfs && extended_func_num(pdev) > probe_vf) {
- mlx4_warn(dev, "Skipping virtual function:%d\n",
- extended_func_num(pdev));
- err = -ENODEV;
- goto err_free_dev;
+ if (total_vfs) {
+ unsigned vfs_offset = 0;
+ for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
+ vfs_offset + nvfs[i] < extended_func_num(pdev);
+ vfs_offset += nvfs[i], i++)
+ ;
+ if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
+ err = -ENODEV;
+ goto err_free_dev;
+ }
+ if ((extended_func_num(pdev) - vfs_offset)
+ > prb_vf[i]) {
+ mlx4_warn(dev, "Skipping virtual function:%d\n",
+ extended_func_num(pdev));
+ err = -ENODEV;
+ goto err_free_dev;
+ }
}
mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
dev->flags |= MLX4_FLAG_SLAVE;
@@ -2324,22 +2362,30 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
}
}
- if (num_vfs) {
- mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
-
- atomic_inc(&pf_loading);
- err = pci_enable_sriov(pdev, num_vfs);
- atomic_dec(&pf_loading);
-
- if (err) {
- mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
- err);
+ if (total_vfs) {
+ mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
+ total_vfs);
+ dev->dev_vfs = kzalloc(
+ total_vfs * sizeof(*dev->dev_vfs),
+ GFP_KERNEL);
+ if (NULL == dev->dev_vfs) {
+ mlx4_err(dev, "Failed to allocate memory for VFs\n");
err = 0;
} else {
- mlx4_warn(dev, "Running in master mode\n");
- dev->flags |= MLX4_FLAG_SRIOV |
- MLX4_FLAG_MASTER;
- dev->num_vfs = num_vfs;
+ atomic_inc(&pf_loading);
+ err = pci_enable_sriov(pdev, total_vfs);
+ atomic_dec(&pf_loading);
+ if (err) {
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+ err);
+ err = 0;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev->flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev->num_vfs = total_vfs;
+ sriov_initialized = 1;
+ }
}
}
@@ -2404,12 +2450,37 @@ slave_start:
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
+ unsigned sum = 0;
err = mlx4_multi_func_init(dev);
if (err) {
mlx4_err(dev, "Failed to init master mfunc"
"interface, aborting.\n");
goto err_close;
}
+ if (sriov_initialized) {
+ int ib_ports = 0;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_ports++;
+
+ if (ib_ports &&
+ (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
+ mlx4_err(dev,
+ "Invalid syntax of num_vfs/probe_vfs "
+ "with IB port. Single port VFs syntax"
+ " is only supported when all ports "
+ "are configured as ethernet\n");
+ goto err_close;
+ }
+ for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
+ unsigned j;
+ for (j = 0; j < nvfs[i]; ++sum, ++j) {
+ dev->dev_vfs[sum].min_port =
+ i < 2 ? i + 1 : 1;
+ dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
+ dev->caps.num_ports;
+ }
+ }
+ }
}
err = mlx4_alloc_eq_table(dev);
@@ -2517,6 +2588,8 @@ err_rel_own:
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
+ kfree(priv->dev.dev_vfs);
+
err_free_dev:
kfree(priv);
@@ -2603,6 +2676,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
kfree(dev->caps.qp0_proxy);
kfree(dev->caps.qp1_tunnel);
kfree(dev->caps.qp1_proxy);
+ kfree(dev->dev_vfs);
kfree(priv);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index db7dc0b6667d..80ccb4edf825 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1387,9 +1387,12 @@ int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
u32 qpn = (u32) vhcr->in_param & 0xffffffff;
- u8 port = vhcr->in_param >> 62;
+ int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
enum mlx4_steer_type steer = vhcr->in_modifier;
+ if (port < 0)
+ return -EINVAL;
+
/* Promiscuous unicast is not allowed in mfunc */
if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7aec6c833973..cf8be41abb36 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -788,6 +788,10 @@ enum {
MLX4_USE_RR = 1,
};
+struct mlx4_roce_gid_entry {
+ u8 raw[16];
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -834,6 +838,7 @@ struct mlx4_priv {
int fs_hash_mode;
u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
__be64 slave_node_guids[MLX4_MFUNC_MAX];
+ struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
atomic_t opreq_count;
struct work_struct opreq_task;
@@ -1242,11 +1247,6 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
-int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
- struct mlx4_vhcr *vhcr,
- struct mlx4_cmd_mailbox *inbox,
- struct mlx4_cmd_mailbox *outbox,
- struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
@@ -1282,4 +1282,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
void mlx4_init_quotas(struct mlx4_dev *dev);
+int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
+/* Returns the VF index of slave */
+int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index b57e8c87a34e..7a733c287744 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -187,6 +187,13 @@ enum {
#define GET_AVG_PERF_COUNTER(cnt) (0)
#endif /* MLX4_EN_PERF_STAT */
+/* Constants for TX flow */
+enum {
+ MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+ MAX_BF = 256,
+ MIN_PKT_LEN = 17,
+};
+
/*
* Configurables
*/
@@ -267,10 +274,13 @@ struct mlx4_en_tx_ring {
unsigned long bytes;
unsigned long packets;
unsigned long tx_csum;
+ unsigned long queue_stopped;
+ unsigned long wake_queue;
struct mlx4_bf bf;
bool bf_enabled;
struct netdev_queue *tx_queue;
int hwtstamp_tx_type;
+ int inline_thold;
};
struct mlx4_en_rx_desc {
@@ -346,6 +356,7 @@ struct mlx4_en_port_profile {
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
+ int inline_thold;
};
struct mlx4_en_profile {
@@ -548,6 +559,10 @@ struct mlx4_en_priv {
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
+#ifdef CONFIG_MLX4_EN_VXLAN
+ struct work_struct vxlan_add_task;
+ struct work_struct vxlan_del_task;
+#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
@@ -574,6 +589,7 @@ struct mlx4_en_priv {
struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
#endif
u64 tunnel_reg_id;
+ __be16 vxlan_port;
};
enum mlx4_en_wol {
@@ -737,7 +753,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
int cq, int user_prio);
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
-
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node);
@@ -786,7 +802,6 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
#define MLX4_EN_NUM_SELF_TEST 5
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
-u64 mlx4_en_mac_to_u64(u8 *addr);
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index a58bcbf1b806..cfcad26ed40f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -505,6 +505,84 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
mlx4_free_cmd_mailbox(dev, outmailbox);
return err;
}
+static struct mlx4_roce_gid_entry zgid_entry;
+
+int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
+{
+ int vfs;
+ int slave_gid = slave;
+ unsigned i;
+ struct mlx4_slaves_pport slaves_pport;
+ struct mlx4_active_ports actv_ports;
+ unsigned max_port_p_one;
+
+ if (slave == 0)
+ return MLX4_ROCE_PF_GIDS;
+
+ /* Slave is a VF */
+ slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
+ actv_ports = mlx4_get_active_ports(dev, slave);
+ max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
+
+ for (i = 1; i < max_port_p_one; i++) {
+ struct mlx4_active_ports exclusive_ports;
+ struct mlx4_slaves_pport slaves_pport_actv;
+ bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+ set_bit(i - 1, exclusive_ports.ports);
+ if (i == port)
+ continue;
+ slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
+ dev->num_vfs + 1);
+ }
+ vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+ if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
+ return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
+ return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
+}
+
+int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
+{
+ int gids;
+ unsigned i;
+ int slave_gid = slave;
+ int vfs;
+
+ struct mlx4_slaves_pport slaves_pport;
+ struct mlx4_active_ports actv_ports;
+ unsigned max_port_p_one;
+
+ if (slave == 0)
+ return 0;
+
+ slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
+ actv_ports = mlx4_get_active_ports(dev, slave);
+ max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
+
+ for (i = 1; i < max_port_p_one; i++) {
+ struct mlx4_active_ports exclusive_ports;
+ struct mlx4_slaves_pport slaves_pport_actv;
+ bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+ set_bit(i - 1, exclusive_ports.ports);
+ if (i == port)
+ continue;
+ slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
+ dev->num_vfs + 1);
+ }
+ gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
+ vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+ if (slave_gid <= gids % vfs)
+ return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
+
+ return MLX4_ROCE_PF_GIDS + (gids % vfs) +
+ ((gids / vfs) * (slave_gid - 1));
+}
+EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
u8 op_mod, struct mlx4_cmd_mailbox *inbox)
@@ -515,14 +593,18 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
struct mlx4_slave_state *slave_st = &master->slave_state[slave];
struct mlx4_set_port_rqp_calc_context *qpn_context;
struct mlx4_set_port_general_context *gen_context;
+ struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
int reset_qkey_viols;
int port;
int is_eth;
+ int num_gids;
+ int base;
u32 in_modifier;
u32 promisc;
u16 mtu, prev_mtu;
int err;
- int i;
+ int i, j;
+ int offset;
__be32 agg_cap_mask;
__be32 slave_cap_mask;
__be32 new_cap_mask;
@@ -535,7 +617,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
/* Slaves cannot perform SET_PORT operations except changing MTU */
if (is_eth) {
if (slave != dev->caps.function &&
- in_modifier != MLX4_SET_PORT_GENERAL) {
+ in_modifier != MLX4_SET_PORT_GENERAL &&
+ in_modifier != MLX4_SET_PORT_GID_TABLE) {
mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
slave);
return -EINVAL;
@@ -581,6 +664,67 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
break;
+ case MLX4_SET_PORT_GID_TABLE:
+ /* change to MULTIPLE entries: number of guest's gids
+ * need a FOR-loop here over number of gids the guest has.
+ * 1. Check no duplicates in gids passed by slave
+ */
+ num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+ base = mlx4_get_base_gid_ix(dev, slave, port);
+ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+ for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
+ if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
+ sizeof(zgid_entry)))
+ continue;
+ gid_entry_mb1 = gid_entry_mbox + 1;
+ for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
+ if (!memcmp(gid_entry_mb1->raw,
+ zgid_entry.raw, sizeof(zgid_entry)))
+ continue;
+ if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
+ sizeof(gid_entry_mbox->raw))) {
+ /* found duplicate */
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* 2. Check that do not have duplicates in OTHER
+ * entries in the port GID table
+ */
+ for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
+ if (i >= base && i < base + num_gids)
+ continue; /* don't compare to slave's current gids */
+ gid_entry_tbl = &priv->roce_gids[port - 1][i];
+ if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
+ continue;
+ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+ for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
+ if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
+ sizeof(zgid_entry)))
+ continue;
+ if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
+ sizeof(gid_entry_tbl->raw))) {
+ /* found duplicate */
+ mlx4_warn(dev, "requested gid entry for slave:%d "
+ "is a duplicate of gid at index %d\n",
+ slave, i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* insert slave GIDs with memcpy, starting at slave's base index */
+ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+ for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
+ memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16);
+
+ /* Now, copy roce port gids table to current mailbox for passing to FW */
+ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+ for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
+ memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16);
+
+ break;
}
return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
@@ -646,6 +790,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
+ int port = mlx4_slave_convert_port(
+ dev, slave, vhcr->in_modifier & 0xFF);
+
+ if (port < 0)
+ return -EINVAL;
+
+ vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
+ (port & 0xFF);
+
return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
vhcr->op_modifier, inbox);
}
@@ -835,7 +988,7 @@ struct mlx4_set_port_vxlan_context {
u8 steering;
};
-int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
{
int err;
u32 in_mod;
@@ -849,7 +1002,8 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
memset(context, 0, sizeof(*context));
context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
- context->enable_flags = VXLAN_ENABLE;
+ if (enable)
+ context->enable_flags = VXLAN_ENABLE;
context->steering = steering;
in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
@@ -927,3 +1081,108 @@ void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
}
EXPORT_SYMBOL(mlx4_set_stats_bitmap);
+
+int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
+ int *slave_id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, found_ix = -1;
+ int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
+ struct mlx4_slaves_pport slaves_pport;
+ unsigned num_vfs;
+ int slave_gid;
+
+ if (!mlx4_is_mfunc(dev))
+ return -EINVAL;
+
+ slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
+ num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+
+ for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
+ if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) {
+ found_ix = i;
+ break;
+ }
+ }
+
+ if (found_ix >= 0) {
+ if (found_ix < MLX4_ROCE_PF_GIDS)
+ slave_gid = 0;
+ else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
+ (vf_gids / num_vfs + 1))
+ slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
+ (vf_gids / num_vfs + 1)) + 1;
+ else
+ slave_gid =
+ ((found_ix - MLX4_ROCE_PF_GIDS -
+ ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
+ (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
+
+ if (slave_gid) {
+ struct mlx4_active_ports exclusive_ports;
+ struct mlx4_active_ports actv_ports;
+ struct mlx4_slaves_pport slaves_pport_actv;
+ unsigned max_port_p_one;
+ int num_slaves_before = 1;
+
+ for (i = 1; i < port; i++) {
+ bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+ set_bit(i, exclusive_ports.ports);
+ slaves_pport_actv =
+ mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ num_slaves_before += bitmap_weight(
+ slaves_pport_actv.slaves,
+ dev->num_vfs + 1);
+ }
+
+ if (slave_gid < num_slaves_before) {
+ bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+ set_bit(port - 1, exclusive_ports.ports);
+ slaves_pport_actv =
+ mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ slave_gid += bitmap_weight(
+ slaves_pport_actv.slaves,
+ dev->num_vfs + 1) -
+ num_slaves_before;
+ }
+ actv_ports = mlx4_get_active_ports(dev, slave_gid);
+ max_port_p_one = find_first_bit(
+ actv_ports.ports, dev->caps.num_ports) +
+ bitmap_weight(actv_ports.ports,
+ dev->caps.num_ports) + 1;
+
+ for (i = 1; i < max_port_p_one; i++) {
+ if (i == port)
+ continue;
+ bitmap_zero(exclusive_ports.ports,
+ dev->caps.num_ports);
+ set_bit(i - 1, exclusive_ports.ports);
+ slaves_pport_actv =
+ mlx4_phys_to_slaves_pport_actv(
+ dev, &exclusive_ports);
+ slave_gid += bitmap_weight(
+ slaves_pport_actv.slaves,
+ dev->num_vfs + 1);
+ }
+ }
+ *slave_id = slave_gid;
+ }
+
+ return (found_ix >= 0) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
+
+int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
+ u8 *gid)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (!mlx4_is_master(dev))
+ return -EINVAL;
+
+ memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16);
+ return 0;
+}
+EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 57428a0cb9dd..3b5f53ef29b2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -52,6 +52,8 @@
struct mac_res {
struct list_head list;
u64 mac;
+ int ref_count;
+ u8 smac_index;
u8 port;
};
@@ -219,6 +221,11 @@ struct res_fs_rule {
int qpn;
};
+static int mlx4_is_eth(struct mlx4_dev *dev, int port)
+{
+ return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
+}
+
static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
{
struct rb_node *node = root->rb_node;
@@ -461,6 +468,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
spin_lock_init(&res_alloc->alloc_lock);
for (t = 0; t < dev->num_vfs + 1; t++) {
+ struct mlx4_active_ports actv_ports =
+ mlx4_get_active_ports(dev, t);
switch (i) {
case RES_QP:
initialize_res_quotas(dev, res_alloc, RES_QP,
@@ -490,10 +499,27 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
case RES_MAC:
if (t == mlx4_master_func_num(dev)) {
- res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+ int max_vfs_pport = 0;
+ /* Calculate the max vfs per port for */
+ /* both ports. */
+ for (j = 0; j < dev->caps.num_ports;
+ j++) {
+ struct mlx4_slaves_pport slaves_pport =
+ mlx4_phys_to_slaves_pport(dev, j + 1);
+ unsigned current_slaves =
+ bitmap_weight(slaves_pport.slaves,
+ dev->caps.num_ports) - 1;
+ if (max_vfs_pport < current_slaves)
+ max_vfs_pport =
+ current_slaves;
+ }
+ res_alloc->quota[t] =
+ MLX4_MAX_MAC_NUM -
+ 2 * max_vfs_pport;
res_alloc->guaranteed[t] = 2;
for (j = 0; j < MLX4_MAX_PORTS; j++)
- res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
+ res_alloc->res_port_free[j] =
+ MLX4_MAX_MAC_NUM;
} else {
res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
res_alloc->guaranteed[t] = 2;
@@ -521,9 +547,10 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
}
if (i == RES_MAC || i == RES_VLAN) {
- for (j = 0; j < MLX4_MAX_PORTS; j++)
- res_alloc->res_port_rsvd[j] +=
- res_alloc->guaranteed[t];
+ for (j = 0; j < dev->caps.num_ports; j++)
+ if (test_bit(j, actv_ports.ports))
+ res_alloc->res_port_rsvd[j] +=
+ res_alloc->guaranteed[t];
} else {
res_alloc->res_reserved += res_alloc->guaranteed[t];
}
@@ -600,15 +627,37 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+ int port;
- if (MLX4_QP_ST_UD == ts)
- qp_ctx->pri_path.mgid_index = 0x80 | slave;
-
- if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
- qp_ctx->pri_path.mgid_index = slave & 0x7F;
- if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
- qp_ctx->alt_path.mgid_index = slave & 0x7F;
+ if (MLX4_QP_ST_UD == ts) {
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port))
+ qp_ctx->pri_path.mgid_index =
+ mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
+ else
+ qp_ctx->pri_path.mgid_index = slave | 0x80;
+
+ } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port)) {
+ qp_ctx->pri_path.mgid_index +=
+ mlx4_get_base_gid_ix(dev, slave, port);
+ qp_ctx->pri_path.mgid_index &= 0x7f;
+ } else {
+ qp_ctx->pri_path.mgid_index = slave & 0x7F;
+ }
+ }
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+ port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port)) {
+ qp_ctx->alt_path.mgid_index +=
+ mlx4_get_base_gid_ix(dev, slave, port);
+ qp_ctx->alt_path.mgid_index &= 0x7f;
+ } else {
+ qp_ctx->alt_path.mgid_index = slave & 0x7F;
+ }
+ }
}
}
@@ -619,7 +668,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_qp_context *qpc = inbox->buf + 8;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
- u32 qp_type;
int port;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
@@ -627,12 +675,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (MLX4_VGT != vp_oper->state.default_vlan) {
- qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
- if (MLX4_QP_ST_RC == qp_type ||
- (MLX4_QP_ST_UD == qp_type &&
- !mlx4_is_qp_reserved(dev, qpn)))
- return -EINVAL;
-
/* the reserved QPs (special, proxy, tunnel)
* do not operate over vlans
*/
@@ -1659,11 +1701,39 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err;
}
-static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
+ u8 smac_index, u64 *mac)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->smac_index == smac_index && res->port == (u8) port) {
+ *mac = res->mac;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
- struct mac_res *res;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->mac == mac && res->port == (u8) port) {
+ /* mac found. update ref count */
+ ++res->ref_count;
+ return 0;
+ }
+ }
if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
return -EINVAL;
@@ -1674,6 +1744,8 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
}
res->mac = mac;
res->port = (u8) port;
+ res->smac_index = smac_index;
+ res->ref_count = 1;
list_add_tail(&res->list,
&tracker->slave_list[slave].res_list[RES_MAC]);
return 0;
@@ -1690,9 +1762,11 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
- list_del(&res->list);
- mlx4_release_resource(dev, slave, RES_MAC, 1, port);
- kfree(res);
+ if (!--res->ref_count) {
+ list_del(&res->list);
+ mlx4_release_resource(dev, slave, RES_MAC, 1, port);
+ kfree(res);
+ }
break;
}
}
@@ -1705,10 +1779,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
+ int i;
list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list);
- __mlx4_unregister_mac(dev, res->port, res->mac);
+ /* dereference the mac the num times the slave referenced it */
+ for (i = 0; i < res->ref_count; i++)
+ __mlx4_unregister_mac(dev, res->port, res->mac);
mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res);
}
@@ -1720,21 +1797,28 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
int err = -EINVAL;
int port;
u64 mac;
+ u8 smac_index;
if (op != RES_OP_RESERVE_AND_MAP)
return err;
port = !in_port ? get_param_l(out_param) : in_port;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
mac = in_param;
err = __mlx4_register_mac(dev, port, mac);
if (err >= 0) {
+ smac_index = err;
set_param_l(out_param, err);
err = 0;
}
if (!err) {
- err = mac_add_to_slave(dev, slave, mac, port);
+ err = mac_add_to_slave(dev, slave, mac, port, smac_index);
if (err)
__mlx4_unregister_mac(dev, port, mac);
}
@@ -1831,6 +1915,11 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (!port || op != RES_OP_RESERVE_AND_MAP)
return -EINVAL;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
if (!in_port && port > 0 && port <= dev->caps.num_ports) {
slave_state[slave].old_vlan_api = true;
@@ -2128,6 +2217,11 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) {
case RES_OP_RESERVE_AND_MAP:
port = !in_port ? get_param_l(out_param) : in_port;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
mac_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_mac(dev, port, in_param);
break;
@@ -2147,6 +2241,11 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int err = 0;
+ port = mlx4_slave_convert_port(
+ dev, slave, port);
+
+ if (port < 0)
+ return -EINVAL;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
if (slave_state[slave].old_vlan_api)
@@ -2734,6 +2833,8 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
u32 qp_type;
struct mlx4_qp_context *qp_ctx;
enum mlx4_qp_optpar optpar;
+ int port;
+ int num_gids;
qp_ctx = inbox->buf + 8;
qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
@@ -2741,6 +2842,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
switch (qp_type) {
case MLX4_QP_ST_RC:
+ case MLX4_QP_ST_XRC:
case MLX4_QP_ST_UC:
switch (transition) {
case QP_TRANS_INIT2RTR:
@@ -2749,13 +2851,24 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
case QP_TRANS_SQD2SQD:
case QP_TRANS_SQD2RTS:
if (slave != mlx4_master_func_num(dev))
- /* slaves have only gid index 0 */
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
- if (qp_ctx->pri_path.mgid_index)
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+ port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+ num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+ else
+ num_gids = 1;
+ if (qp_ctx->pri_path.mgid_index >= num_gids)
return -EINVAL;
- if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
- if (qp_ctx->alt_path.mgid_index)
+ }
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+ port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
+ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+ num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+ else
+ num_gids = 1;
+ if (qp_ctx->alt_path.mgid_index >= num_gids)
return -EINVAL;
+ }
break;
default:
break;
@@ -3268,6 +3381,58 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox)
+{
+ enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
+ u8 pri_sched_queue;
+ int port = mlx4_slave_convert_port(
+ dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
+
+ if (port < 0)
+ return -EINVAL;
+
+ pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
+ ((port & 1) << 6);
+
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
+ mlx4_is_eth(dev, port + 1)) {
+ qpc->pri_path.sched_queue = pri_sched_queue;
+ }
+
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+ port = mlx4_slave_convert_port(
+ dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
+ + 1) - 1;
+ if (port < 0)
+ return -EINVAL;
+ qpc->alt_path.sched_queue =
+ (qpc->alt_path.sched_queue & ~(1 << 6)) |
+ (port & 1) << 6;
+ }
+ return 0;
+}
+
+static int roce_verify_mac(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox)
+{
+ u64 mac;
+ int port;
+ u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ u8 sched = *(u8 *)(inbox->buf + 64);
+ u8 smac_ix;
+
+ port = (sched >> 6 & 1) + 1;
+ if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
+ smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
+ if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
+ return -ENOENT;
+ }
+ return 0;
+}
+
int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -3286,10 +3451,16 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
u8 orig_vlan_index = qpc->pri_path.vlan_index;
u8 orig_feup = qpc->pri_path.feup;
+ err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
if (err)
return err;
+ if (roce_verify_mac(dev, slave, qpc, inbox))
+ return -EINVAL;
+
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, qpc);
@@ -3334,6 +3505,9 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
if (err)
return err;
@@ -3353,6 +3527,9 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
if (err)
return err;
@@ -3371,6 +3548,9 @@ int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
struct mlx4_qp_context *context = inbox->buf + 8;
+ int err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
adjust_proxy_tun_qkey(dev, vhcr, context);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
@@ -3384,6 +3564,9 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
if (err)
return err;
@@ -3403,6 +3586,9 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
+ err = adjust_qp_sched_queue(dev, slave, context, inbox);
+ if (err)
+ return err;
err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
if (err)
return err;
@@ -3506,16 +3692,26 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
return err;
}
-static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_loopback, enum mlx4_protocol prot,
+static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
+ u8 gid[16], int block_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type type, u64 *reg_id)
{
switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_DEVICE_MANAGED:
- return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ int port = mlx4_slave_convert_port(dev, slave, gid[5]);
+ if (port < 0)
+ return port;
+ return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
block_loopback, prot,
reg_id);
+ }
case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH) {
+ int port = mlx4_slave_convert_port(dev, slave, gid[5]);
+ if (port < 0)
+ return port;
+ gid[5] = port;
+ }
return mlx4_qp_attach_common(dev, qp, gid,
block_loopback, prot, type);
default:
@@ -3523,9 +3719,9 @@ static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
}
-static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- enum mlx4_protocol prot, enum mlx4_steer_type type,
- u64 reg_id)
+static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], enum mlx4_protocol prot,
+ enum mlx4_steer_type type, u64 reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
@@ -3562,7 +3758,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
qp.qpn = qpn;
if (attach) {
- err = qp_attach(dev, &qp, gid, block_loopback, prot,
+ err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
type, &reg_id);
if (err) {
pr_err("Fail to attach rule to qp 0x%x\n", qpn);
@@ -3698,6 +3894,9 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
+ ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
+ if (ctrl->port <= 0)
+ return -EINVAL;
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
@@ -3816,16 +4015,6 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
- struct mlx4_vhcr *vhcr,
- struct mlx4_cmd_mailbox *inbox,
- struct mlx4_cmd_mailbox *outbox,
- struct mlx4_cmd_info *cmd)
-{
- return -EPERM;
-}
-
-
static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
{
struct res_gid *rgid;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 23b7e2d35a93..77ac95f052da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -116,7 +116,6 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = &dev->priv.eq_table;
int num_eqs = 1 << dev->caps.log_max_eq;
int nvec;
- int err;
int i;
nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
@@ -131,17 +130,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
for (i = 0; i < nvec; i++)
table->msix_arr[i].entry = i;
-retry:
- table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
- err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
- if (err <= 0) {
- return err;
- } else if (err > 2) {
- nvec = err;
- goto retry;
- }
+ nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+ MLX5_EQ_VEC_COMP_BASE, nvec);
+ if (nvec < 0)
+ return nvec;
- mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
+ table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
}
@@ -537,7 +531,6 @@ static int __init init(void)
return 0;
- mlx5_health_cleanup();
err_debug:
mlx5_unregister_debugfs();
return err;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ce84dc289c8f..14ac0e2bc09f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4832,7 +4832,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
skb->csum = old->csum;
skb_set_network_header(skb, ETH_HLEN);
- dev_kfree_skb(old);
+ dev_consume_skb_any(old);
}
/**
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 68026f7e8ba3..130f6b204efa 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2329,16 +2329,14 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
status = 0;
if (myri10ge_msi) {
if (mgp->num_slices > 1) {
- status =
- pci_enable_msix(pdev, mgp->msix_vectors,
- mgp->num_slices);
- if (status == 0) {
- mgp->msix_enabled = 1;
- } else {
+ status = pci_enable_msix_range(pdev, mgp->msix_vectors,
+ mgp->num_slices, mgp->num_slices);
+ if (status < 0) {
dev_err(&pdev->dev,
"Error %d setting up MSI-X\n", status);
return status;
}
+ mgp->msix_enabled = 1;
}
if (mgp->msix_enabled == 0) {
status = pci_enable_msi(pdev);
@@ -3895,32 +3893,34 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
GFP_KERNEL);
if (mgp->msix_vectors == NULL)
- goto disable_msix;
+ goto no_msix;
for (i = 0; i < mgp->num_slices; i++) {
mgp->msix_vectors[i].entry = i;
}
while (mgp->num_slices > 1) {
- /* make sure it is a power of two */
- while (!is_power_of_2(mgp->num_slices))
- mgp->num_slices--;
+ mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
if (mgp->num_slices == 1)
- goto disable_msix;
- status = pci_enable_msix(pdev, mgp->msix_vectors,
- mgp->num_slices);
- if (status == 0) {
- pci_disable_msix(pdev);
+ goto no_msix;
+ status = pci_enable_msix_range(pdev,
+ mgp->msix_vectors,
+ mgp->num_slices,
+ mgp->num_slices);
+ if (status < 0)
+ goto no_msix;
+
+ pci_disable_msix(pdev);
+
+ if (status == mgp->num_slices) {
if (old_allocated)
kfree(old_fw);
return;
- }
- if (status > 0)
+ } else {
mgp->num_slices = status;
- else
- goto disable_msix;
+ }
}
-disable_msix:
+no_msix:
if (mgp->msix_vectors != NULL) {
kfree(mgp->msix_vectors);
mgp->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9eeddbd0b2c7..a2844ff322c4 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2914,6 +2914,9 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget)
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
+ if (budget <= 0)
+ return napi_pkts;
+
get_info = ring_data->rx_curr_get_info;
get_block = get_info.block_index;
memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
@@ -3792,9 +3795,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
writeq(rx_mat, &bar0->rx_mat);
readq(&bar0->rx_mat);
- ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
+ ret = pci_enable_msix_range(nic->pdev, nic->entries,
+ nic->num_entries, nic->num_entries);
/* We fail init if error or we get less vectors than min required */
- if (ret) {
+ if (ret < 0) {
DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
kfree(nic->entries);
swstats->mem_freed += nic->num_entries *
@@ -4045,7 +4049,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
if (!is_s2io_card_up(sp)) {
DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -4118,7 +4122,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
s2io_stop_tx_queue(sp, fifo->fifo_no);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -4240,7 +4244,7 @@ pci_map_failed:
swstats->pci_map_fail_cnt++;
s2io_stop_tx_queue(sp, fifo->fifo_no);
swstats->mem_freed += skb->truesize;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index e46e8698e630..d107bcbb8543 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -368,6 +368,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
ring->ndev->name, __func__, __LINE__);
+ if (ring->budget <= 0)
+ goto out;
+
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
rx_priv = vxge_hw_ring_rxd_private_get(dtr);
@@ -525,6 +528,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
if (first_dtr)
vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
+out:
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...",
__func__, __LINE__);
@@ -820,7 +824,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len <= 0)) {
vxge_debug_tx(VXGE_ERR,
"%s: Buffer has no data..", dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -829,7 +833,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!is_vxge_card_up(vdev))) {
vxge_debug_tx(VXGE_ERR,
"%s: vdev not initialized", dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -839,7 +843,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
vxge_debug_tx(VXGE_ERR,
"%s: Failed to store the mac address",
dev->name);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
}
@@ -986,7 +990,7 @@ _exit1:
vxge_hw_fifo_txdl_free(fifo_hw, dtr);
_exit0:
netif_tx_stop_queue(fifo->txq);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2349,12 +2353,18 @@ start:
vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].in_use = 0;
- ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
- if (ret > 0) {
+ ret = pci_enable_msix_range(vdev->pdev,
+ vdev->entries, 3, vdev->intr_cnt);
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto enable_msix_failed;
+ } else if (ret < vdev->intr_cnt) {
+ pci_disable_msix(vdev->pdev);
+
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d",
VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
- if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
+ if (max_config_vpath != VXGE_USE_DEFAULT) {
ret = -ENODEV;
goto enable_msix_failed;
}
@@ -2368,9 +2378,6 @@ start:
vxge_close_vpaths(vdev, temp);
vdev->no_of_vpath = temp;
goto start;
- } else if (ret < 0) {
- ret = -ENODEV;
- goto enable_msix_failed;
}
return 0;
@@ -3131,12 +3138,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
u64 packets, bytes, multicast;
do {
- start = u64_stats_fetch_begin_bh(&rxstats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxstats->syncp);
packets = rxstats->rx_frms;
multicast = rxstats->rx_mcast;
bytes = rxstats->rx_bytes;
- } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
net_stats->rx_packets += packets;
net_stats->rx_bytes += bytes;
@@ -3146,11 +3153,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
net_stats->rx_dropped += rxstats->rx_dropped;
do {
- start = u64_stats_fetch_begin_bh(&txstats->syncp);
+ start = u64_stats_fetch_begin_irq(&txstats->syncp);
packets = txstats->tx_frms;
bytes = txstats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&txstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
net_stats->tx_packets += packets;
net_stats->tx_bytes += bytes;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 70cf97fe67f2..fddb464aeab3 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1753,19 +1753,19 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
/* software stats */
do {
- syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
storage->rx_packets = np->stat_rx_packets;
storage->rx_bytes = np->stat_rx_bytes;
storage->rx_dropped = np->stat_rx_dropped;
storage->rx_missed_errors = np->stat_rx_missed_errors;
- } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+ } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
do {
- syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+ syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
storage->tx_packets = np->stat_tx_packets;
storage->tx_bytes = np->stat_tx_bytes;
storage->tx_dropped = np->stat_tx_dropped;
- } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
+ } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
/* If the nic supports hw counters then retrieve latest values */
if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@@ -2231,7 +2231,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pci_dma_mapping_error(np->pci_dev,
np->put_tx_ctx->dma)) {
/* on DMA mapping error - drop the packet */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2277,7 +2277,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->first_tx_ctx;
} while (tmp_tx_ctx != np->put_tx_ctx);
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
@@ -2380,7 +2380,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
if (pci_dma_mapping_error(np->pci_dev,
np->put_tx_ctx->dma)) {
/* on DMA mapping error - drop the packet */
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2427,7 +2427,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->first_tx_ctx;
} while (tmp_tx_ctx != np->put_tx_ctx);
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx;
u64_stats_update_begin(&np->swstats_tx_syncp);
np->stat_tx_dropped++;
@@ -3930,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- int ret = 1;
+ int ret;
int i;
irqreturn_t (*handler)(int foo, void *data);
@@ -3946,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
np->msi_x_entry[i].entry = i;
- ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
- if (ret == 0) {
+ ret = pci_enable_msix_range(np->pci_dev,
+ np->msi_x_entry,
+ np->msi_flags & NV_MSI_X_VECTORS_MASK,
+ np->msi_flags & NV_MSI_X_VECTORS_MASK);
+ if (ret > 0) {
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
/* Request irq for rx handling */
sprintf(np->name_rx, "%s-rx", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
- nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
+ nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for rx %d\n",
ret);
@@ -3963,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
/* Request irq for tx handling */
sprintf(np->name_tx, "%s-tx", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
- nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
+ nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for tx %d\n",
ret);
@@ -3974,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
/* Request irq for link and timer handling */
sprintf(np->name_other, "%s-other", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
- nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
+ nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for link %d\n",
ret);
@@ -3991,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
} else {
/* Request irq for all interrupts */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
+ handler, IRQF_SHARED, dev->name, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed %d\n",
ret);
@@ -4005,13 +4013,15 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIXMap1);
}
netdev_info(dev, "MSI-X enabled\n");
+ return 0;
}
}
- if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ if (np->msi_flags & NV_MSI_CAPABLE) {
ret = pci_enable_msi(np->pci_dev);
if (ret == 0) {
np->msi_flags |= NV_MSI_ENABLED;
- if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
+ if (ret) {
netdev_info(dev, "request_irq failed %d\n",
ret);
pci_disable_msi(np->pci_dev);
@@ -4025,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
netdev_info(dev, "MSI enabled\n");
+ return 0;
}
}
- if (ret != 0) {
- if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
- goto out_err;
- }
+ if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
+ goto out_err;
return 0;
out_free_tx:
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 464e91058c81..73e66838cfef 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -120,10 +120,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
static void pch_gbe_set_multi(struct net_device *netdev);
-static struct sock_filter ptp_filter[] = {
- PTP_FILTER
-};
-
static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
{
u8 *data = skb->data;
@@ -131,7 +127,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
u16 *hi, *id;
u32 lo;
- if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
+ if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
return 0;
offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
@@ -2635,11 +2631,6 @@ static int pch_gbe_probe(struct pci_dev *pdev,
adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
PCI_DEVFN(12, 4));
- if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
- dev_err(&pdev->dev, "Bad ptp filter\n");
- ret = -EINVAL;
- goto err_free_netdev;
- }
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index f59e6be4a66e..c14bd3116e45 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -56,6 +56,16 @@ config QLCNIC_DCB
mode of DCB is supported. PG and PFC values are related only
to Tx.
+config QLCNIC_VXLAN
+ bool "Virtual eXtensible Local Area Network (VXLAN) offload support"
+ default n
+ depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m)
+ ---help---
+ This enables hardware offload support for VXLAN protocol over QLogic's
+ 84XX series adapters.
+ Say Y here if you want to enable hardware offload support for
+ Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 70849dea32b1..f09c35d669b3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -643,8 +643,9 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
if (adapter->msix_supported) {
netxen_init_msix_entries(adapter, num_msix);
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
+ err = pci_enable_msix_range(pdev, adapter->msix_entries,
+ num_msix, num_msix);
+ if (err > 0) {
adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
netxen_set_msix_bit(pdev, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f19f81cde134..f31bb5e9d8a9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 55
-#define QLCNIC_LINUX_VERSIONID "5.3.55"
+#define _QLCNIC_LINUX_SUBVERSION 57
+#define QLCNIC_LINUX_VERSIONID "5.3.57"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -169,11 +169,20 @@ struct cmd_desc_type0 {
__le64 addr_buffer2;
- __le16 reference_handle;
+ __le16 encap_descr; /* 15:10 offset of outer L3 header,
+ * 9:6 number of 32bit words in outer L3 header,
+ * 5 offload outer L4 checksum,
+ * 4 offload outer L3 checksum,
+ * 3 Inner L4 type, TCP=0, UDP=1,
+ * 2 Inner L3 type, IPv4=0, IPv6=1,
+ * 1 Outer L3 type,IPv4=0, IPv6=1,
+ * 0 type of encapsulation, GRE=0, VXLAN=1
+ */
__le16 mss;
u8 port_ctxid; /* 7:4 ctxid 3:0 port */
- u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
- __le16 conn_id; /* IPSec offoad only */
+ u8 hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ u8 outer_hdr_length; /* Encapsulation only */
+ u8 rsvd1;
__le64 addr_buffer3;
__le64 addr_buffer1;
@@ -183,7 +192,9 @@ struct cmd_desc_type0 {
__le64 addr_buffer4;
u8 eth_addr[ETH_ALEN];
- __le16 vlan_TCI;
+ __le16 vlan_TCI; /* In case of encapsulation,
+ * this is for outer VLAN
+ */
} __attribute__ ((aligned(64)));
@@ -394,7 +405,7 @@ struct qlcnic_nic_intr_coalesce {
u32 timer_out;
};
-struct qlcnic_dump_template_hdr {
+struct qlcnic_83xx_dump_template_hdr {
u32 type;
u32 offset;
u32 size;
@@ -411,15 +422,42 @@ struct qlcnic_dump_template_hdr {
u32 rsvd[0];
};
+struct qlcnic_82xx_dump_template_hdr {
+ u32 type;
+ u32 offset;
+ u32 size;
+ u32 cap_mask;
+ u32 num_entries;
+ u32 version;
+ u32 timestamp;
+ u32 checksum;
+ u32 drv_cap_mask;
+ u32 sys_info[3];
+ u32 saved_state[16];
+ u32 cap_sizes[8];
+ u32 rsvd[7];
+ u32 capabilities;
+ u32 rsvd1[0];
+};
+
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
bool enable; /* enable/disable dump */
u32 size; /* total size of the dump */
+ u32 cap_mask; /* Current capture mask */
void *data; /* dump data area */
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ void *tmpl_hdr;
dma_addr_t phys_addr;
void *dma_buffer;
bool use_pex_dma;
+ /* Read only elements which are common between 82xx and 83xx
+ * template header. Update these values immediately after we read
+ * template header from Firmware
+ */
+ u32 tmpl_hdr_size;
+ u32 version;
+ u32 num_entries;
+ u32 offset;
};
/*
@@ -497,6 +535,7 @@ struct qlcnic_hardware_context {
u8 extend_lb_time;
u8 phys_port_id[ETH_ALEN];
u8 lb_mode;
+ u16 vxlan_port;
};
struct qlcnic_adapter_stats {
@@ -511,6 +550,9 @@ struct qlcnic_adapter_stats {
u64 txbytes;
u64 lrobytes;
u64 lso_frames;
+ u64 encap_lso_frames;
+ u64 encap_tx_csummed;
+ u64 encap_rx_csummed;
u64 xmit_on;
u64 xmit_off;
u64 skb_alloc_failure;
@@ -872,6 +914,10 @@ struct qlcnic_mac_vlan_list {
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4
+
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
@@ -966,6 +1012,11 @@ struct qlcnic_ipaddr {
#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
#define QLCNIC_TSS_RSS 0x80000
+#ifdef CONFIG_QLCNIC_VXLAN
+#define QLCNIC_ADD_VXLAN_PORT 0x100000
+#define QLCNIC_DEL_VXLAN_PORT 0x200000
+#endif
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1769,10 +1820,28 @@ struct qlcnic_hardware_ops {
struct qlcnic_host_tx_ring *);
void (*disable_tx_intr) (struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
+ u32 (*get_saved_state)(void *, u32);
+ void (*set_saved_state)(void *, u32, u32);
+ void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
+ u32 (*get_cap_size)(void *, int);
+ void (*set_sys_info)(void *, int, u32);
+ void (*store_cap_mask)(void *, u32);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
+}
+
static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
{
return adapter->nic_ops->start_firmware(adapter);
@@ -2007,6 +2076,42 @@ static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->read_phys_port_id(adapter);
}
+static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index)
+{
+ return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
+}
+
+static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index, u32 value)
+{
+ adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
+}
+
+static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_dump *fw_dump)
+{
+ adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
+}
+
+static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int index)
+{
+ return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
+}
+
+static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int idx, u32 value)
+{
+ adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
+}
+
+static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, u32 mask)
+{
+ adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
+}
+
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
u32 key)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 27c4f131863b..b7cffb46a75d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -77,7 +77,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
{QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
{QLCNIC_CMD_IDC_ACK, 5, 1},
- {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1},
{QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
{QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
{QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
@@ -87,6 +87,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
+ {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -203,7 +204,12 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.disable_sds_intr = qlcnic_83xx_disable_sds_intr,
.enable_tx_intr = qlcnic_83xx_enable_tx_intr,
.disable_tx_intr = qlcnic_83xx_disable_tx_intr,
-
+ .get_saved_state = qlcnic_83xx_get_saved_state,
+ .set_saved_state = qlcnic_83xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_83xx_get_cap_size,
+ .set_sys_info = qlcnic_83xx_set_sys_info,
+ .store_cap_mask = qlcnic_83xx_store_cap_mask,
};
static struct qlcnic_nic_template qlcnic_83xx_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index f92485ca21d1..88d809c35633 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -308,6 +308,8 @@ struct qlc_83xx_reset {
#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
struct qlc_83xx_idc {
int (*state_entry) (struct qlcnic_adapter *);
u64 sec_counter;
@@ -526,8 +528,9 @@ enum qlc_83xx_ext_regs {
};
/* Initialize/Stop NIC command bit definitions */
-#define QLC_REGISTER_DCB_AEN BIT_1
#define QLC_REGISTER_LB_IDC BIT_0
+#define QLC_REGISTER_DCB_AEN BIT_1
+#define QLC_83XX_MULTI_TENANCY_INFO BIT_29
#define QLC_INIT_FW_RESOURCES BIT_31
/* 83xx funcitons */
@@ -650,4 +653,10 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+u32 qlcnic_83xx_get_saved_state(void *, u32);
+void qlcnic_83xx_set_saved_state(void *, u32, u32);
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_83xx_get_cap_size(void *, int);
+void qlcnic_83xx_set_sys_info(void *, int, u32);
+void qlcnic_83xx_store_cap_mask(void *, u32);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 90a2dda351ec..b48737dcd3c5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1020,10 +1020,99 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
return 0;
}
+#ifdef CONFIG_QLCNIC_VXLAN
+#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
+#define QLC_83XX_MATCH_ENCAP_ID BIT_2
+#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
+#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16)
+
+#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
+#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
+
+static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
+{
+ u16 port = adapter->ahw->vxlan_port;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_INIT_NIC_FUNC);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
+ cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
+ QLC_83XX_SET_VXLAN_UDP_DPORT |
+ QLC_83XX_VXLAN_UDP_DPORT(port);
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to set VXLAN port %d in adapter\n",
+ port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+
+static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
+ bool state)
+{
+ u16 vxlan_port = adapter->ahw->vxlan_port;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_INGRESS_ENCAP);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
+ QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to %s VXLAN parsing for port %d\n",
+ state ? "enable" : "disable", vxlan_port);
+ else
+ netdev_info(adapter->netdev,
+ "%s VXLAN parsing for port %d\n",
+ state ? "Enabled" : "Disabled", vxlan_port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+#endif
+
static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
{
if (adapter->fhash.fnum)
qlcnic_prune_lb_filters(adapter);
+
+#ifdef CONFIG_QLCNIC_VXLAN
+ if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
+ if (qlcnic_set_vxlan_port(adapter))
+ return;
+
+ if (qlcnic_set_vxlan_parsing(adapter, true))
+ return;
+
+ adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT;
+ } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) {
+ if (qlcnic_set_vxlan_parsing(adapter, false))
+ return;
+
+ adapter->ahw->vxlan_port = 0;
+ adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
+ }
+#endif
}
/**
@@ -1301,7 +1390,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
addr = (u64)dest;
ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- (u32 *)p_cache, size / 16);
+ p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
release_firmware(fw);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index acee1a5d80c6..5bacf5210aed 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -47,6 +47,12 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
{"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
{"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
{"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames),
+ QLC_OFF(stats.encap_lso_frames)},
+ {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed),
+ QLC_OFF(stats.encap_tx_csummed)},
+ {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed),
+ QLC_OFF(stats.encap_rx_csummed)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
{"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
@@ -1639,14 +1645,14 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
}
if (fw_dump->clr)
- dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
+ dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
else
dump->len = 0;
if (!qlcnic_check_fw_dump_state(adapter))
dump->flag = ETH_FW_DUMP_DISABLE;
else
- dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->flag = fw_dump->cap_mask;
dump->version = adapter->fw_version;
return 0;
@@ -1671,9 +1677,10 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
netdev_info(netdev, "Dump not available\n");
return -EINVAL;
}
+
/* Copy template header first */
- copy_sz = fw_dump->tmpl_hdr->size;
- hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
+ copy_sz = fw_dump->tmpl_hdr_size;
+ hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
data = buffer;
for (i = 0; i < copy_sz/sizeof(u32); i++)
*data++ = cpu_to_le32(*hdr_ptr++);
@@ -1681,7 +1688,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
/* Copy captured dump data */
memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
dump->len = copy_sz + fw_dump->size;
- dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->flag = fw_dump->cap_mask;
/* Free dump area once data has been captured */
vfree(fw_dump->data);
@@ -1703,7 +1710,11 @@ static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
return -EOPNOTSUPP;
}
- fw_dump->tmpl_hdr->drv_cap_mask = mask;
+ fw_dump->cap_mask = mask;
+
+ /* Store new capture mask in template header as well*/
+ qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
+
netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 03d18a0be6ce..9f3adf4e70b5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -317,9 +317,7 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
int
qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
{
- int timeout = 0;
- int err = 0;
- u32 done = 0;
+ int timeout = 0, err = 0, done = 0;
while (!done) {
done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
@@ -327,10 +325,20 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
if (done == 1)
break;
if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
- dev_err(&adapter->pdev->dev,
- "Failed to acquire sem=%d lock; holdby=%d\n",
- sem,
- id_reg ? QLCRD32(adapter, id_reg, &err) : -1);
+ if (id_reg) {
+ done = QLCRD32(adapter, id_reg, &err);
+ if (done != -1)
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock held by=%d\n",
+ sem, done);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock",
+ sem);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock", sem);
+ }
return -EIO;
}
msleep(1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 63d75617d445..cbe2399c30a0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -98,6 +98,7 @@ enum qlcnic_regs {
#define QLCNIC_CMD_GET_LINK_EVENT 0x48
#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
+#define QLCNIC_CMD_SET_INGRESS_ENCAP 0x4E
#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
#define QLCNIC_CMD_IDC_ACK 0x63
@@ -161,6 +162,7 @@ struct qlcnic_host_sds_ring;
struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
+struct qlcnic_fw_dump;
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
@@ -213,4 +215,11 @@ int qlcnic_82xx_shutdown(struct pci_dev *);
int qlcnic_82xx_resume(struct qlcnic_adapter *);
void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
void qlcnic_fw_poll_work(struct work_struct *work);
+
+u32 qlcnic_82xx_get_saved_state(void *, u32);
+void qlcnic_82xx_set_saved_state(void *, u32, u32);
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_82xx_get_cap_size(void *, int);
+void qlcnic_82xx_set_sys_info(void *, int, u32);
+void qlcnic_82xx_store_cap_mask(void *, u32);
#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 54ebf300332a..173b3d12991f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -13,16 +13,19 @@
#include "qlcnic.h"
-#define TX_ETHER_PKT 0x01
-#define TX_TCP_PKT 0x02
-#define TX_UDP_PKT 0x03
-#define TX_IP_PKT 0x04
-#define TX_TCP_LSO 0x05
-#define TX_TCP_LSO6 0x06
-#define TX_TCPV6_PKT 0x0b
-#define TX_UDPV6_PKT 0x0c
-#define FLAGS_VLAN_TAGGED 0x10
-#define FLAGS_VLAN_OOB 0x40
+#define QLCNIC_TX_ETHER_PKT 0x01
+#define QLCNIC_TX_TCP_PKT 0x02
+#define QLCNIC_TX_UDP_PKT 0x03
+#define QLCNIC_TX_IP_PKT 0x04
+#define QLCNIC_TX_TCP_LSO 0x05
+#define QLCNIC_TX_TCP_LSO6 0x06
+#define QLCNIC_TX_ENCAP_PKT 0x07
+#define QLCNIC_TX_ENCAP_LSO 0x08
+#define QLCNIC_TX_TCPV6_PKT 0x0b
+#define QLCNIC_TX_UDPV6_PKT 0x0c
+
+#define QLCNIC_FLAGS_VLAN_TAGGED 0x10
+#define QLCNIC_FLAGS_VLAN_OOB 0x40
#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
(cmd_desc)->vlan_TCI = cpu_to_le16(v);
@@ -364,6 +367,101 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
spin_unlock(&adapter->mac_learn_lock);
}
+#define QLCNIC_ENCAP_VXLAN_PKT BIT_0
+#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
+#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
+#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
+#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
+#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
+
+static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
+ int copied, copy_len, descr_size;
+ u32 producer = tx_ring->producer;
+ struct cmd_desc_type0 *hwdesc;
+ u16 flags = 0, encap_descr = 0;
+
+ opcode = QLCNIC_TX_ETHER_PKT;
+ encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
+
+ if (skb_is_gso(skb)) {
+ inner_hdr_len = skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) -
+ skb_inner_mac_header(skb);
+
+ /* VXLAN header size = 8 */
+ outer_hdr_len = skb_transport_offset(skb) + 8 +
+ sizeof(struct udphdr);
+ first_desc->outer_hdr_length = outer_hdr_len;
+ total_hdr_len = inner_hdr_len + outer_hdr_len;
+ encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
+ QLCNIC_ENCAP_DO_L4_CSUM;
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->hdr_length = inner_hdr_len;
+
+ /* Copy inner and outer headers in Tx descriptor(s)
+ * If total_hdr_len > cmd_desc_type0, use multiple
+ * descriptors
+ */
+ copied = 0;
+ descr_size = (int)sizeof(struct cmd_desc_type0);
+ while (copied < total_hdr_len) {
+ copy_len = min(descr_size, (total_hdr_len - copied));
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc,
+ copy_len);
+ copied += copy_len;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+
+ /* Make sure updated tx_ring->producer is visible
+ * for qlcnic_tx_avail()
+ */
+ smp_mb();
+ adapter->stats.encap_lso_frames++;
+
+ opcode = QLCNIC_TX_ENCAP_LSO;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ } else {
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ }
+
+ adapter->stats.encap_tx_csummed++;
+ opcode = QLCNIC_TX_ENCAP_PKT;
+ }
+
+ /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
+ if (ip_hdr(skb)->version == 6)
+ encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
+
+ /* outer IP header's size in 32bit words size*/
+ encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
+
+ /* outer IP header offset */
+ encap_descr |= skb_network_offset(skb) << 10;
+ first_desc->encap_descr = cpu_to_le16(encap_descr);
+
+ first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
+ skb->data;
+ first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
+
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ return 0;
+}
+
static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
struct qlcnic_host_tx_ring *tx_ring)
@@ -378,11 +476,11 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
if (protocol == ETH_P_8021Q) {
vh = (struct vlan_ethhdr *)skb->data;
- flags = FLAGS_VLAN_TAGGED;
+ flags = QLCNIC_FLAGS_VLAN_TAGGED;
vlan_tci = ntohs(vh->h_vlan_TCI);
protocol = ntohs(vh->h_vlan_encapsulated_proto);
} else if (vlan_tx_tag_present(skb)) {
- flags = FLAGS_VLAN_OOB;
+ flags = QLCNIC_FLAGS_VLAN_OOB;
vlan_tci = vlan_tx_tag_get(skb);
}
if (unlikely(adapter->tx_pvid)) {
@@ -391,7 +489,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
goto set_flags;
- flags = FLAGS_VLAN_OOB;
+ flags = QLCNIC_FLAGS_VLAN_OOB;
vlan_tci = adapter->tx_pvid;
}
set_flags:
@@ -402,25 +500,26 @@ set_flags:
flags |= BIT_0;
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
}
- opcode = TX_ETHER_PKT;
+ opcode = QLCNIC_TX_ETHER_PKT;
if (skb_is_gso(skb)) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- first_desc->total_hdr_length = hdr_len;
- opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+ first_desc->hdr_length = hdr_len;
+ opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
+ QLCNIC_TX_TCP_LSO;
/* For LSO, we need to copy the MAC/IP/TCP headers into
* the descriptor ring */
copied = 0;
offset = 2;
- if (flags & FLAGS_VLAN_OOB) {
- first_desc->total_hdr_length += VLAN_HLEN;
+ if (flags & QLCNIC_FLAGS_VLAN_OOB) {
+ first_desc->hdr_length += VLAN_HLEN;
first_desc->tcp_hdr_offset = VLAN_HLEN;
first_desc->ip_hdr_offset = VLAN_HLEN;
/* Only in case of TSO on vlan device */
- flags |= FLAGS_VLAN_TAGGED;
+ flags |= QLCNIC_FLAGS_VLAN_TAGGED;
/* Create a TSO vlan header template for firmware */
hwdesc = &tx_ring->desc_head[producer];
@@ -464,16 +563,16 @@ set_flags:
l4proto = ip_hdr(skb)->protocol;
if (l4proto == IPPROTO_TCP)
- opcode = TX_TCP_PKT;
+ opcode = QLCNIC_TX_TCP_PKT;
else if (l4proto == IPPROTO_UDP)
- opcode = TX_UDP_PKT;
+ opcode = QLCNIC_TX_UDP_PKT;
} else if (protocol == ETH_P_IPV6) {
l4proto = ipv6_hdr(skb)->nexthdr;
if (l4proto == IPPROTO_TCP)
- opcode = TX_TCPV6_PKT;
+ opcode = QLCNIC_TX_TCPV6_PKT;
else if (l4proto == IPPROTO_UDP)
- opcode = TX_UDPV6_PKT;
+ opcode = QLCNIC_TX_UDPV6_PKT;
}
}
first_desc->tcp_hdr_offset += skb_transport_offset(skb);
@@ -563,6 +662,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct ethhdr *phdr;
int i, k, frag_count, delta = 0;
u32 producer, num_txd;
+ u16 protocol;
+ bool l4_is_udp = false;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_tx_stop_all_queues(netdev);
@@ -653,8 +754,23 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->producer = get_next_index(producer, num_txd);
smp_mb();
- if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
- goto unwind_buff;
+ protocol = ntohs(skb->protocol);
+ if (protocol == ETH_P_IP)
+ l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
+ else if (protocol == ETH_P_IPV6)
+ l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
+
+ /* Check if it is a VXLAN packet */
+ if (!skb->encapsulation || !l4_is_udp ||
+ !qlcnic_encap_tx_offload(adapter)) {
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
+ tx_ring)))
+ goto unwind_buff;
+ } else {
+ if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
+ skb, tx_ring)))
+ goto unwind_buff;
+ }
if (adapter->drv_mac_learn)
qlcnic_send_filter(adapter, first_desc, skb);
@@ -1587,6 +1703,13 @@ static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
}
+#define QLCNIC_ENCAP_LENGTH_MASK 0x7f
+
+static inline u8 qlcnic_encap_length(u64 sts_data)
+{
+ return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
+}
+
static struct qlcnic_rx_buffer *
qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_host_sds_ring *sds_ring,
@@ -1637,6 +1760,12 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
+ if (qlcnic_encap_length(sts_data[1]) &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ skb->encapsulation = 1;
+ adapter->stats.encap_rx_csummed++;
+ }
+
if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1222865cfb73..309d05640883 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -21,6 +21,9 @@
#include <linux/aer.h>
#include <linux/log2.h>
#include <linux/pci.h>
+#ifdef CONFIG_QLCNIC_VXLAN
+#include <net/vxlan.h>
+#endif
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -90,7 +93,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *);
static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
pci_channel_state_t);
-
static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -462,6 +464,37 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_QLCNIC_VXLAN
+static void qlcnic_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Adapter supports only one VXLAN port. Use very first port
+ * for enabling offload
+ */
+ if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
+ return;
+
+ ahw->vxlan_port = ntohs(port);
+ adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
+}
+
+static void qlcnic_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
+ (ahw->vxlan_port != ntohs(port)))
+ return;
+
+ adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+}
+#endif
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -480,6 +513,10 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
.ndo_get_phys_port_id = qlcnic_get_phys_port_id,
+#ifdef CONFIG_QLCNIC_VXLAN
+ .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
+ .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -561,6 +598,12 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.disable_sds_intr = qlcnic_82xx_disable_sds_intr,
.enable_tx_intr = qlcnic_82xx_enable_tx_intr,
.disable_tx_intr = qlcnic_82xx_disable_tx_intr,
+ .get_saved_state = qlcnic_82xx_get_saved_state,
+ .set_saved_state = qlcnic_82xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_82xx_get_cap_size,
+ .set_sys_info = qlcnic_82xx_set_sys_info,
+ .store_cap_mask = qlcnic_82xx_store_cap_mask,
};
static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -684,7 +727,7 @@ restore:
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int err = -1, vector;
+ int err, vector;
if (!adapter->msix_entries) {
adapter->msix_entries = kcalloc(num_msix,
@@ -701,13 +744,17 @@ enable_msix:
for (vector = 0; vector < num_msix; vector++)
adapter->msix_entries[vector].entry = vector;
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
+ err = pci_enable_msix_range(pdev,
+ adapter->msix_entries, 1, num_msix);
+
+ if (err == num_msix) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
adapter->ahw->num_msix = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
- return err;
+ return 0;
} else if (err > 0) {
+ pci_disable_msix(pdev);
+
dev_info(&pdev->dev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
num_msix, err);
@@ -715,12 +762,12 @@ enable_msix:
if (qlcnic_82xx_check(adapter)) {
num_msix = rounddown_pow_of_two(err);
if (err < QLCNIC_82XX_MINIMUM_VECTOR)
- return -EIO;
+ return -ENOSPC;
} else {
num_msix = rounddown_pow_of_two(err - 1);
num_msix += 1;
if (err < QLCNIC_83XX_MINIMUM_VECTOR)
- return -EIO;
+ return -ENOSPC;
}
if (qlcnic_82xx_check(adapter) &&
@@ -747,7 +794,7 @@ enable_msix:
}
}
- return err;
+ return -EIO;
}
static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
@@ -1934,6 +1981,11 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
qlcnic_create_sysfs_entries(adapter);
+#ifdef CONFIG_QLCNIC_VXLAN
+ if (qlcnic_encap_rx_offload(adapter))
+ vxlan_get_rx_port(netdev);
+#endif
+
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
return 0;
@@ -2196,6 +2248,19 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
netdev->features |= NETIF_F_LRO;
+ if (qlcnic_encap_tx_offload(adapter)) {
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+ /* encapsulation Tx offload supported by Adapter */
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+ }
+
+ if (qlcnic_encap_rx_offload(adapter))
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
+
netdev->hw_features = netdev->features;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
@@ -2442,8 +2507,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
switch (err) {
case -ENOTRECOVERABLE:
- dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n");
- dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n");
+ dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
+ dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
goto err_out_free_hw;
case -ENOMEM:
dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 7763962e2ec4..37b979b1266b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -211,6 +211,107 @@ enum qlcnic_minidump_opcode {
QLCNIC_DUMP_RDEND = 255
};
+inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+}
+
+inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
+
+ hdr->drv_cap_mask = mask;
+}
+
+inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+}
+
+inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = tmpl_hdr;
+ hdr->drv_cap_mask = mask;
+}
+
struct qlcnic_dump_operations {
enum qlcnic_minidump_opcode opcode;
u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
@@ -238,11 +339,11 @@ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
+ void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
+ struct __ctrl *ctr = &entry->region.ctrl;
int i, k, timeout = 0;
- u32 addr, data;
+ u32 addr, data, temp;
u8 no_ops;
- struct __ctrl *ctr = &entry->region.ctrl;
- struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
addr = ctr->addr;
no_ops = ctr->no_ops;
@@ -285,29 +386,42 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
}
break;
case QLCNIC_DUMP_RD_SAVE:
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
data = qlcnic_ind_rd(adapter, addr);
- t_hdr->saved_state[ctr->index_v] = data;
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
break;
case QLCNIC_DUMP_WRT_SAVED:
- if (ctr->index_v)
- data = t_hdr->saved_state[ctr->index_v];
+ temp = ctr->index_v;
+ if (temp)
+ data = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
else
data = ctr->val1;
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
+
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_MOD_SAVE_ST:
- data = t_hdr->saved_state[ctr->index_v];
+ data = qlcnic_get_saved_state(adapter, hdr,
+ ctr->index_v);
data <<= ctr->shl_val;
data >>= ctr->shr_val;
if (ctr->val2)
data &= ctr->val2;
data |= ctr->val3;
data += ctr->val1;
- t_hdr->saved_state[ctr->index_v] = data;
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
break;
default:
dev_info(&adapter->pdev->dev,
@@ -544,7 +658,7 @@ out:
static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
struct __mem *mem)
{
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
u32 dma_no, dma_base_addr, temp_addr;
int i, ret, dma_sts;
@@ -596,7 +710,7 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
u32 temp, dma_base_addr, size = 0, read_size = 0;
struct qlcnic_pex_dma_descriptor *dma_descr;
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
dma_addr_t dma_phys_addr;
void *dma_buffer;
@@ -938,8 +1052,8 @@ static int
qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_dump_template_hdr tmp_hdr;
- u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
+ struct qlcnic_83xx_dump_template_hdr tmp_hdr;
+ u32 size = sizeof(tmp_hdr) / sizeof(u32);
int ret = 0;
if (qlcnic_82xx_check(adapter))
@@ -1027,17 +1141,19 @@ free_mem:
return err;
}
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
{
- int err;
- u32 temp_size = 0;
- u32 version, csum, *tmp_buf;
struct qlcnic_hardware_context *ahw;
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ struct qlcnic_fw_dump *fw_dump;
+ u32 version, csum, *tmp_buf;
u8 use_flash_temp = 0;
+ u32 temp_size = 0;
+ int err;
ahw = adapter->ahw;
-
+ fw_dump = &ahw->fw_dump;
err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
&use_flash_temp);
if (err) {
@@ -1046,11 +1162,11 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
return -EIO;
}
- ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
- if (!ahw->fw_dump.tmpl_hdr)
+ fw_dump->tmpl_hdr = vzalloc(temp_size);
+ if (!fw_dump->tmpl_hdr)
return -ENOMEM;
- tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
+ tmp_buf = (u32 *)fw_dump->tmpl_hdr;
if (use_flash_temp)
goto flash_temp;
@@ -1065,8 +1181,8 @@ flash_temp:
dev_err(&adapter->pdev->dev,
"Failed to get minidump template header %d\n",
err);
- vfree(ahw->fw_dump.tmpl_hdr);
- ahw->fw_dump.tmpl_hdr = NULL;
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
return -EIO;
}
}
@@ -1076,21 +1192,22 @@ flash_temp:
if (csum) {
dev_err(&adapter->pdev->dev,
"Template header checksum validation failed\n");
- vfree(ahw->fw_dump.tmpl_hdr);
- ahw->fw_dump.tmpl_hdr = NULL;
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
return -EIO;
}
- tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
+ qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+
dev_info(&adapter->pdev->dev,
"Default minidump capture mask 0x%x\n",
- tmpl_hdr->cap_mask);
+ fw_dump->cap_mask);
- if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
- ahw->fw_dump.use_pex_dma = true;
+ if (qlcnic_83xx_check(adapter) &&
+ (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
+ fw_dump->use_pex_dma = true;
else
- ahw->fw_dump.use_pex_dma = false;
+ fw_dump->use_pex_dma = false;
qlcnic_enable_fw_dump_state(adapter);
@@ -1099,21 +1216,22 @@ flash_temp:
int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
{
- __le32 *buffer;
- u32 ocm_window;
- char mesg[64];
- char *msg[] = {mesg, NULL};
- int i, k, ops_cnt, ops_index, dump_size = 0;
- u32 entry_offset, dump, no_entries, buf_offset = 0;
- struct qlcnic_dump_entry *entry;
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
static const struct qlcnic_dump_operations *fw_dump_ops;
+ struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
+ u32 entry_offset, dump, no_entries, buf_offset = 0;
+ int i, k, ops_cnt, ops_index, dump_size = 0;
struct device *dev = &adapter->pdev->dev;
struct qlcnic_hardware_context *ahw;
- void *temp_buffer;
+ struct qlcnic_dump_entry *entry;
+ void *temp_buffer, *tmpl_hdr;
+ u32 ocm_window;
+ __le32 *buffer;
+ char mesg[64];
+ char *msg[] = {mesg, NULL};
ahw = adapter->ahw;
+ tmpl_hdr = fw_dump->tmpl_hdr;
/* Return if we don't have firmware dump template header */
if (!tmpl_hdr)
@@ -1133,8 +1251,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
/* Calculate the size for dump data area only */
for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
- if (i & tmpl_hdr->drv_cap_mask)
- dump_size += tmpl_hdr->cap_sizes[k];
+ if (i & fw_dump->cap_mask)
+ dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
+
if (!dump_size)
return -EIO;
@@ -1144,10 +1263,10 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
buffer = fw_dump->data;
fw_dump->size = dump_size;
- no_entries = tmpl_hdr->num_entries;
- entry_offset = tmpl_hdr->offset;
- tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
- tmpl_hdr->sys_info[1] = adapter->fw_version;
+ no_entries = fw_dump->num_entries;
+ entry_offset = fw_dump->offset;
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
if (fw_dump->use_pex_dma) {
temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
@@ -1163,16 +1282,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
fw_dump_ops = qlcnic_fw_dump_ops;
} else {
+ hdr_83xx = tmpl_hdr;
ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
fw_dump_ops = qlcnic_83xx_fw_dump_ops;
- ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
- tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
- tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+ ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
+ hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+ hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
}
for (i = 0; i < no_entries; i++) {
- entry = (void *)tmpl_hdr + entry_offset;
- if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+ entry = tmpl_hdr + entry_offset;
+ if (!(entry->hdr.mask & fw_dump->cap_mask)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
entry_offset += entry->hdr.offset;
continue;
@@ -1209,8 +1329,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
- dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
- adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+ netdev_info(adapter->netdev,
+ "Dump data %d bytes captured, template header size %d bytes\n",
+ fw_dump->size, fw_dump->tmpl_hdr_size);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index e5277a632671..14f748cbf0de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -15,6 +15,7 @@
#define QLC_MAC_OPCODE_MASK 0x7
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
+#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -335,8 +336,11 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
return err;
cmd.req.arg[1] = 0x4;
- if (enable)
+ if (enable) {
cmd.req.arg[1] |= BIT_16;
+ if (qlcnic_84xx_check(adapter))
+ cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ }
err = qlcnic_issue_cmd(adapter, &cmd);
if (err)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 3d64113a35af..448d156c3d08 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -350,33 +350,15 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
-static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 count = 0;
-
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
- return ahw->total_nic_func;
-
- if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
- count = QLC_DEFAULT_VNIC_COUNT;
- else
- count = ahw->max_vnic_func;
-
- return count;
-}
-
int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
{
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int i;
- for (i = 0; i < pci_func_count; i++) {
+ for (i = 0; i < adapter->ahw->max_vnic_func; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
-
- return -1;
+ return -EINVAL;
}
static int validate_pm_config(struct qlcnic_adapter *adapter,
@@ -464,23 +446,21 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_pm_func_cfg *pm_cfg;
- int i, pm_cfg_size;
u8 pci_func;
+ u32 count;
+ int i;
- pm_cfg_size = pci_func_count * sizeof(*pm_cfg);
- if (size != pm_cfg_size)
- return QL_STATUS_INVALID_PARAM;
-
- memset(buf, 0, pm_cfg_size);
+ memset(buf, 0, size);
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
-
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
pci_func = adapter->npars[i].pci_func;
- if (!adapter->npars[i].active)
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
continue;
-
+ }
if (!adapter->npars[i].eswitch_status)
continue;
@@ -494,7 +474,6 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
static int validate_esw_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_hardware_context *ahw = adapter->ahw;
int i, ret;
u32 op_mode;
@@ -507,7 +486,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- if (pci_func >= pci_func_count)
+ if (pci_func >= ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -642,23 +621,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_func_cfg *esw_cfg;
- size_t esw_cfg_size;
- u8 i, pci_func;
-
- esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
- if (size != esw_cfg_size)
- return QL_STATUS_INVALID_PARAM;
+ u8 pci_func;
+ u32 count;
+ int i;
- memset(buf, 0, esw_cfg_size);
+ memset(buf, 0, size);
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
-
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
pci_func = adapter->npars[i].pci_func;
- if (!adapter->npars[i].active)
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
continue;
-
+ }
if (!adapter->npars[i].eswitch_status)
continue;
@@ -741,23 +718,24 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
- size_t np_cfg_size;
int i, ret;
-
- np_cfg_size = pci_func_count * sizeof(*np_cfg);
- if (size != np_cfg_size)
- return QL_STATUS_INVALID_PARAM;
+ u32 count;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- memset(buf, 0, np_cfg_size);
+ memset(buf, 0, size);
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
+ if (adapter->npars[i].pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
return ret;
@@ -783,7 +761,6 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_statistics port_stats;
int ret;
@@ -793,7 +770,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
- if (offset >= pci_func_count)
+ if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
memset(&port_stats, 0, size);
@@ -884,13 +861,12 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
- if (offset >= pci_func_count)
+ if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -914,17 +890,12 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
- size_t pci_cfg_sz;
int i, ret;
+ u32 count;
- pci_cfg_sz = pci_func_count * sizeof(*pci_cfg);
- if (size != pci_cfg_sz)
- return QL_STATUS_INVALID_PARAM;
-
- pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -935,7 +906,8 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
}
pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_pci_func_cfg);
+ for (i = 0; i < count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
pci_cfg[i].func_state = 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 656c65ddadb4..0a1d76acab81 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2556,11 +2556,10 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
if (skb_is_gso(skb)) {
int err;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
@@ -3331,24 +3330,16 @@ static void ql_enable_msix(struct ql_adapter *qdev)
for (i = 0; i < qdev->intr_count; i++)
qdev->msi_x_entry[i].entry = i;
- /* Loop to get our vectors. We start with
- * what we want and settle for what we get.
- */
- do {
- err = pci_enable_msix(qdev->pdev,
- qdev->msi_x_entry, qdev->intr_count);
- if (err > 0)
- qdev->intr_count = err;
- } while (err > 0);
-
+ err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
+ 1, qdev->intr_count);
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
netif_warn(qdev, ifup, qdev->ndev,
"MSI-X Enable failed, trying MSI.\n");
- qdev->intr_count = 1;
qlge_irq_type = MSI_IRQ;
- } else if (err == 0) {
+ } else {
+ qdev->intr_count = err;
set_bit(QL_MSIX_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"MSI-X Enabled, got %d vectors.\n",
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 819b74cefd64..cd045ecb9816 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -270,11 +270,6 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
return r6040_phy_write(ioaddr, phy_addr, reg, value);
}
-static int r6040_mdiobus_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static void r6040_free_txbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
@@ -1191,7 +1186,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
lp->mii_bus->priv = dev;
lp->mii_bus->read = r6040_mdiobus_read;
lp->mii_bus->write = r6040_mdiobus_write;
- lp->mii_bus->reset = r6040_mdiobus_reset;
lp->mii_bus->name = "r6040_eth_mii";
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
dev_name(&pdev->dev), card_idx);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 737c1a881f78..2bc728e65e24 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -476,7 +476,7 @@ rx_status_loop:
rx = 0;
cpw16(IntrStatus, cp_rx_intr_mask);
- while (1) {
+ while (rx < budget) {
u32 status, len;
dma_addr_t mapping, new_mapping;
struct sk_buff *skb, *new_skb;
@@ -554,9 +554,6 @@ rx_next:
else
desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
rx_tail = NEXT_RX(rx_tail);
-
- if (rx >= budget)
- break;
}
cp->rx_tail = rx_tail;
@@ -899,7 +896,7 @@ out_unlock:
return NETDEV_TX_OK;
out_dma_error:
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
cp->dev->stats.tx_dropped++;
goto out_unlock;
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index da5972eefdd2..2e5df148af4c 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1717,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
if (len < ETH_ZLEN)
memset(tp->tx_buf[entry], 0, ETH_ZLEN);
skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
} else {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -2522,16 +2522,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
netdev_stats_to_stats64(stats, &dev->stats);
do {
- start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
stats->rx_packets = tp->rx_stats.packets;
stats->rx_bytes = tp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
stats->tx_packets = tp->tx_stats.packets;
stats->tx_bytes = tp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
return stats;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3ff7bc3e7a23..aa1c079f231d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5834,7 +5834,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
tp->TxDescArray + entry);
if (skb) {
tp->dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
tx_skb->skb = NULL;
}
}
@@ -6059,7 +6059,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
err_dma_1:
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
err_dma_0:
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
err_update_stats:
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -6142,7 +6142,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
tp->tx_stats.packets++;
tp->tx_stats.bytes += tx_skb->skb->len;
u64_stats_update_end(&tp->tx_stats.syncp);
- dev_kfree_skb(tx_skb->skb);
+ dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
@@ -6590,17 +6590,17 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
rtl8169_rx_missed(dev, ioaddr);
do {
- start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
stats->rx_packets = tp->rx_stats.packets;
stats->rx_bytes = tp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
stats->tx_packets = tp->tx_stats.packets;
stats->tx_bytes = tp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 040cb94e8219..6a9509ccd33b 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,8 +1,9 @@
/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Cogent Embedded, Inc.
+ * Copyright (C) 2008-2014 Renesas Solutions Corp.
+ * Copyright (C) 2013-2014 Cogent Embedded, Inc.
+ * Copyright (C) 2014 Codethink Limited
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +28,10 @@
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/cache.h>
#include <linux/io.h>
@@ -36,6 +41,7 @@
#include <linux/if_vlan.h>
#include <linux/clk.h>
#include <linux/sh_eth.h>
+#include <linux/of_mdio.h>
#include "sh_eth.h"
@@ -394,7 +400,8 @@ static void sh_eth_select_mii(struct net_device *ndev)
value = 0x0;
break;
default:
- pr_warn("PHY interface mode was not setup. Set to MII.\n");
+ netdev_warn(ndev,
+ "PHY interface mode was not setup. Set to MII.\n");
value = 0x1;
break;
}
@@ -848,7 +855,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
cnt--;
}
if (cnt <= 0) {
- pr_err("Device reset failed\n");
+ netdev_err(ndev, "Device reset failed\n");
ret = -ETIMEDOUT;
}
return ret;
@@ -866,7 +873,7 @@ static int sh_eth_reset(struct net_device *ndev)
ret = sh_eth_check_reset(ndev);
if (ret)
- goto out;
+ return ret;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -893,7 +900,6 @@ static int sh_eth_reset(struct net_device *ndev)
EDMR);
}
-out:
return ret;
}
@@ -1257,7 +1263,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
/* Soft Reset */
ret = sh_eth_reset(ndev);
if (ret)
- goto out;
+ return ret;
if (mdp->cd->rmiimode)
sh_eth_write(ndev, 0x1, RMIIMODE);
@@ -1336,7 +1342,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
netif_start_queue(ndev);
}
-out:
return ret;
}
@@ -1550,8 +1555,7 @@ ignore_link:
/* Unused write back interrupt */
if (intr_status & EESR_TABT) { /* Transmit Abort int */
ndev->stats.tx_aborted_errors++;
- if (netif_msg_tx_err(mdp))
- dev_err(&ndev->dev, "Transmit Abort\n");
+ netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
}
}
@@ -1560,45 +1564,38 @@ ignore_link:
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
ndev->stats.rx_frame_errors++;
- if (netif_msg_rx_err(mdp))
- dev_err(&ndev->dev, "Receive Abort\n");
+ netif_err(mdp, rx_err, ndev, "Receive Abort\n");
}
}
if (intr_status & EESR_TDE) {
/* Transmit Descriptor Empty int */
ndev->stats.tx_fifo_errors++;
- if (netif_msg_tx_err(mdp))
- dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+ netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
}
if (intr_status & EESR_TFE) {
/* FIFO under flow */
ndev->stats.tx_fifo_errors++;
- if (netif_msg_tx_err(mdp))
- dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
+ netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
}
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
-
- if (netif_msg_rx_err(mdp))
- dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+ netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
ndev->stats.rx_fifo_errors++;
- if (netif_msg_rx_err(mdp))
- dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+ netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
}
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
/* Address Error */
ndev->stats.tx_fifo_errors++;
- if (netif_msg_tx_err(mdp))
- dev_err(&ndev->dev, "Address Error\n");
+ netif_err(mdp, tx_err, ndev, "Address Error\n");
}
mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1609,9 +1606,9 @@ ignore_link:
u32 edtrr = sh_eth_read(ndev, EDTRR);
/* dmesg */
- dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
- intr_status, mdp->cur_tx, mdp->dirty_tx,
- (u32)ndev->state, edtrr);
+ netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+ intr_status, mdp->cur_tx, mdp->dirty_tx,
+ (u32)ndev->state, edtrr);
/* dirty buffer free */
sh_eth_txfree(ndev);
@@ -1656,9 +1653,9 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
EESIPR);
__napi_schedule(&mdp->napi);
} else {
- dev_warn(&ndev->dev,
- "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
- intr_status, intr_enable);
+ netdev_warn(ndev,
+ "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
+ intr_status, intr_enable);
}
}
@@ -1757,27 +1754,42 @@ static void sh_eth_adjust_link(struct net_device *ndev)
/* PHY init function */
static int sh_eth_phy_init(struct net_device *ndev)
{
+ struct device_node *np = ndev->dev.parent->of_node;
struct sh_eth_private *mdp = netdev_priv(ndev);
- char phy_id[MII_BUS_ID_SIZE + 3];
struct phy_device *phydev = NULL;
- snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
- mdp->mii_bus->id, mdp->phy_id);
-
mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
/* Try connect to PHY */
- phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
- mdp->phy_interface);
+ if (np) {
+ struct device_node *pn;
+
+ pn = of_parse_phandle(np, "phy-handle", 0);
+ phydev = of_phy_connect(ndev, pn,
+ sh_eth_adjust_link, 0,
+ mdp->phy_interface);
+
+ if (!phydev)
+ phydev = ERR_PTR(-ENOENT);
+ } else {
+ char phy_id[MII_BUS_ID_SIZE + 3];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ mdp->mii_bus->id, mdp->phy_id);
+
+ phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
+ mdp->phy_interface);
+ }
+
if (IS_ERR(phydev)) {
- dev_err(&ndev->dev, "phy_connect failed\n");
+ netdev_err(ndev, "failed to connect PHY\n");
return PTR_ERR(phydev);
}
- dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
- phydev->addr, phydev->irq, phydev->drv->name);
+ netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
mdp->phydev = phydev;
@@ -1958,12 +1970,12 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
ret = sh_eth_ring_init(ndev);
if (ret < 0) {
- dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
+ netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
return ret;
}
ret = sh_eth_dev_init(ndev, false);
if (ret < 0) {
- dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
+ netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
return ret;
}
@@ -2004,7 +2016,7 @@ static int sh_eth_open(struct net_device *ndev)
ret = request_irq(ndev->irq, sh_eth_interrupt,
mdp->cd->irq_flags, ndev->name, ndev);
if (ret) {
- dev_err(&ndev->dev, "Can not assign IRQ number\n");
+ netdev_err(ndev, "Can not assign IRQ number\n");
goto out_napi_off;
}
@@ -2042,10 +2054,9 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
netif_stop_queue(ndev);
- if (netif_msg_timer(mdp)) {
- dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
- ndev->name, (int)sh_eth_read(ndev, EESR));
- }
+ netif_err(mdp, timer, ndev,
+ "transmit timed out, status %8.8x, resetting...\n",
+ (int)sh_eth_read(ndev, EESR));
/* tx_errors count up */
ndev->stats.tx_errors++;
@@ -2080,8 +2091,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
if (!sh_eth_txfree(ndev)) {
- if (netif_msg_tx_queued(mdp))
- dev_warn(&ndev->dev, "TxFD exhausted.\n");
+ netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
return NETDEV_TX_BUSY;
@@ -2098,8 +2108,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (skb->len < ETHERSMALL)
- txdesc->buffer_length = ETHERSMALL;
+ if (skb->len < ETH_ZLEN)
+ txdesc->buffer_length = ETH_ZLEN;
else
txdesc->buffer_length = skb->len;
@@ -2251,7 +2261,7 @@ static int sh_eth_tsu_busy(struct net_device *ndev)
udelay(10);
timeout--;
if (timeout <= 0) {
- dev_err(&ndev->dev, "%s: timeout\n", __func__);
+ netdev_err(ndev, "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
}
@@ -2571,37 +2581,30 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
}
/* MDIO bus release function */
-static int sh_mdio_release(struct net_device *ndev)
+static int sh_mdio_release(struct sh_eth_private *mdp)
{
- struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
-
/* unregister mdio bus */
- mdiobus_unregister(bus);
-
- /* remove mdio bus info from net_device */
- dev_set_drvdata(&ndev->dev, NULL);
+ mdiobus_unregister(mdp->mii_bus);
/* free bitbang info */
- free_mdio_bitbang(bus);
+ free_mdio_bitbang(mdp->mii_bus);
return 0;
}
/* MDIO bus init function */
-static int sh_mdio_init(struct net_device *ndev, int id,
+static int sh_mdio_init(struct sh_eth_private *mdp,
struct sh_eth_plat_data *pd)
{
int ret, i;
struct bb_info *bitbang;
- struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct platform_device *pdev = mdp->pdev;
+ struct device *dev = &mdp->pdev->dev;
/* create bit control struct for PHY */
- bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
- GFP_KERNEL);
- if (!bitbang) {
- ret = -ENOMEM;
- goto out;
- }
+ bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
+ if (!bitbang)
+ return -ENOMEM;
/* bitbang init */
bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
@@ -2614,44 +2617,42 @@ static int sh_mdio_init(struct net_device *ndev, int id,
/* MII controller setting */
mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
- if (!mdp->mii_bus) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mdp->mii_bus)
+ return -ENOMEM;
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
- mdp->mii_bus->parent = &ndev->dev;
+ mdp->mii_bus->parent = dev;
snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- mdp->pdev->name, id);
+ pdev->name, pdev->id);
/* PHY IRQ */
- mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
- sizeof(int) * PHY_MAX_ADDR,
+ mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
GFP_KERNEL);
if (!mdp->mii_bus->irq) {
ret = -ENOMEM;
goto out_free_bus;
}
- for (i = 0; i < PHY_MAX_ADDR; i++)
- mdp->mii_bus->irq[i] = PHY_POLL;
- if (pd->phy_irq > 0)
- mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
+ /* register MDIO bus */
+ if (dev->of_node) {
+ ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
+ } else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mdp->mii_bus->irq[i] = PHY_POLL;
+ if (pd->phy_irq > 0)
+ mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
+
+ ret = mdiobus_register(mdp->mii_bus);
+ }
- /* register mdio bus */
- ret = mdiobus_register(mdp->mii_bus);
if (ret)
goto out_free_bus;
- dev_set_drvdata(&ndev->dev, mdp->mii_bus);
-
return 0;
out_free_bus:
free_mdio_bitbang(mdp->mii_bus);
-
-out:
return ret;
}
@@ -2676,7 +2677,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
reg_offset = sh_eth_offset_fast_sh3_sh2;
break;
default:
- pr_err("Unknown register type (%d)\n", register_type);
break;
}
@@ -2710,6 +2710,48 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_change_mtu = eth_change_mtu,
};
+#ifdef CONFIG_OF
+static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct sh_eth_plat_data *pdata;
+ const char *mac_addr;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->phy_interface = of_get_phy_mode(np);
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+
+ pdata->no_ether_link =
+ of_property_read_bool(np, "renesas,no-ether-link");
+ pdata->ether_link_active_low =
+ of_property_read_bool(np, "renesas,ether-link-active-low");
+
+ return pdata;
+}
+
+static const struct of_device_id sh_eth_match_table[] = {
+ { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
+ { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
+ { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
+ { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_eth_match_table);
+#else
+static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int sh_eth_drv_probe(struct platform_device *pdev)
{
int ret, devno = 0;
@@ -2723,15 +2765,15 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
dev_err(&pdev->dev, "invalid resource\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
- if (!ndev) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!ndev)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
/* The sh Ether-specific entries in the device structure. */
ndev->base_addr = res->start;
@@ -2760,9 +2802,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
spin_lock_init(&mdp->lock);
mdp->pdev = pdev;
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
+ if (pdev->dev.of_node)
+ pd = sh_eth_parse_dt(&pdev->dev);
if (!pd) {
dev_err(&pdev->dev, "no platform data\n");
ret = -EINVAL;
@@ -2778,8 +2820,22 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp->ether_link_active_low = pd->ether_link_active_low;
/* set cpu data */
- mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ if (id) {
+ mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ } else {
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(sh_eth_match_table),
+ &pdev->dev);
+ mdp->cd = (struct sh_eth_cpu_data *)match->data;
+ }
mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
+ if (!mdp->reg_offset) {
+ dev_err(&pdev->dev, "Unknown register type (%d)\n",
+ mdp->cd->register_type);
+ ret = -EINVAL;
+ goto out_release;
+ }
sh_eth_set_default_cpu_data(mdp->cd);
/* set function */
@@ -2825,6 +2881,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
}
}
+ /* MDIO bus init */
+ ret = sh_mdio_init(mdp, pd);
+ if (ret) {
+ dev_err(&ndev->dev, "failed to initialise MDIO\n");
+ goto out_release;
+ }
+
netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
/* network device register */
@@ -2832,31 +2895,26 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto out_napi_del;
- /* mdio bus init */
- ret = sh_mdio_init(ndev, pdev->id, pd);
- if (ret)
- goto out_unregister;
-
/* print device information */
- pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
- (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+ netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+ pm_runtime_put(&pdev->dev);
platform_set_drvdata(pdev, ndev);
return ret;
-out_unregister:
- unregister_netdev(ndev);
-
out_napi_del:
netif_napi_del(&mdp->napi);
+ sh_mdio_release(mdp);
out_release:
/* net_dev free */
if (ndev)
free_netdev(ndev);
-out:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -2865,9 +2923,9 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct sh_eth_private *mdp = netdev_priv(ndev);
- sh_mdio_release(ndev);
unregister_netdev(ndev);
netif_napi_del(&mdp->napi);
+ sh_mdio_release(mdp);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
@@ -2920,6 +2978,7 @@ static struct platform_driver sh_eth_driver = {
.driver = {
.name = CARDNAME,
.pm = SH_ETH_PM_OPS,
+ .of_match_table = of_match_ptr(sh_eth_match_table),
},
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 6075915b88ec..d55e37cd5fec 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,8 +27,7 @@
#define RX_RING_MIN 64
#define TX_RING_MAX 1024
#define RX_RING_MAX 1024
-#define ETHERSMALL 60
-#define PKT_BUF_SZ 1538
+#define PKT_BUF_SZ 1538
#define SH_ETH_TSU_TIMEOUT_MS 500
#define SH_ETH_TSU_CAM_ENTRIES 32
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
new file mode 100644
index 000000000000..7902341f2623
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -0,0 +1,16 @@
+#
+# Samsung Ethernet device configuration
+#
+
+config NET_VENDOR_SAMSUNG
+ bool "Samsung Ethernet device"
+ default y
+ ---help---
+ This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
+ platforms.
+
+if NET_VENDOR_SAMSUNG
+
+source "drivers/net/ethernet/samsung/sxgbe/Kconfig"
+
+endif # NET_VENDOR_SAMSUNG
diff --git a/drivers/net/ethernet/samsung/Makefile b/drivers/net/ethernet/samsung/Makefile
new file mode 100644
index 000000000000..1773c29b8d76
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Samsung Ethernet device drivers.
+#
+
+obj-$(CONFIG_SXGBE_ETH) += sxgbe/
diff --git a/drivers/net/ethernet/samsung/sxgbe/Kconfig b/drivers/net/ethernet/samsung/sxgbe/Kconfig
new file mode 100644
index 000000000000..d79288c51d0a
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/Kconfig
@@ -0,0 +1,9 @@
+config SXGBE_ETH
+ tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver"
+ depends on HAS_IOMEM && HAS_DMA
+ select PHYLIB
+ select CRC32
+ select PTP_1588_CLOCK
+ ---help---
+ This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
+ platforms.
diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile
new file mode 100644
index 000000000000..dcc80b9d4370
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o
+samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \
+ sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \
+ sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
new file mode 100644
index 000000000000..6203c7d8550f
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -0,0 +1,535 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SXGBE_COMMON_H__
+#define __SXGBE_COMMON_H__
+
+/* forward references */
+struct sxgbe_desc_ops;
+struct sxgbe_dma_ops;
+struct sxgbe_mtl_ops;
+
+#define SXGBE_RESOURCE_NAME "sam_sxgbeeth"
+#define DRV_MODULE_VERSION "November_2013"
+
+/* MAX HW feature words */
+#define SXGBE_HW_WORDS 3
+
+#define SXGBE_RX_COE_NONE 0
+
+/* CSR Frequency Access Defines*/
+#define SXGBE_CSR_F_150M 150000000
+#define SXGBE_CSR_F_250M 250000000
+#define SXGBE_CSR_F_300M 300000000
+#define SXGBE_CSR_F_350M 350000000
+#define SXGBE_CSR_F_400M 400000000
+#define SXGBE_CSR_F_500M 500000000
+
+/* pause time */
+#define SXGBE_PAUSE_TIME 0x200
+
+/* tx queues */
+#define SXGBE_TX_QUEUES 8
+#define SXGBE_RX_QUEUES 16
+
+/* Calculated based how much time does it take to fill 256KB Rx memory
+ * at 10Gb speed at 156MHz clock rate and considered little less then
+ * the actual value.
+ */
+#define SXGBE_MAX_DMA_RIWT 0x70
+#define SXGBE_MIN_DMA_RIWT 0x01
+
+/* Tx coalesce parameters */
+#define SXGBE_COAL_TX_TIMER 40000
+#define SXGBE_MAX_COAL_TX_TICK 100000
+#define SXGBE_TX_MAX_FRAMES 512
+#define SXGBE_TX_FRAMES 128
+
+/* SXGBE TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+#define SXGBE_DEFAULT_LIT_LS 0x3E8
+#define SXGBE_DEFAULT_TWT_LS 0x0
+
+/* Flow Control defines */
+#define SXGBE_FLOW_OFF 0
+#define SXGBE_FLOW_RX 1
+#define SXGBE_FLOW_TX 2
+#define SXGBE_FLOW_AUTO (SXGBE_FLOW_TX | SXGBE_FLOW_RX)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+/* errors */
+#define RX_GMII_ERR 0x01
+#define RX_WATCHDOG_ERR 0x02
+#define RX_CRC_ERR 0x03
+#define RX_GAINT_ERR 0x04
+#define RX_IP_HDR_ERR 0x05
+#define RX_PAYLOAD_ERR 0x06
+#define RX_OVERFLOW_ERR 0x07
+
+/* pkt type */
+#define RX_LEN_PKT 0x00
+#define RX_MACCTL_PKT 0x01
+#define RX_DCBCTL_PKT 0x02
+#define RX_ARP_PKT 0x03
+#define RX_OAM_PKT 0x04
+#define RX_UNTAG_PKT 0x05
+#define RX_OTHER_PKT 0x07
+#define RX_SVLAN_PKT 0x08
+#define RX_CVLAN_PKT 0x09
+#define RX_DVLAN_OCVLAN_ICVLAN_PKT 0x0A
+#define RX_DVLAN_OSVLAN_ISVLAN_PKT 0x0B
+#define RX_DVLAN_OSVLAN_ICVLAN_PKT 0x0C
+#define RX_DVLAN_OCVLAN_ISVLAN_PKT 0x0D
+
+#define RX_NOT_IP_PKT 0x00
+#define RX_IPV4_TCP_PKT 0x01
+#define RX_IPV4_UDP_PKT 0x02
+#define RX_IPV4_ICMP_PKT 0x03
+#define RX_IPV4_UNKNOWN_PKT 0x07
+#define RX_IPV6_TCP_PKT 0x09
+#define RX_IPV6_UDP_PKT 0x0A
+#define RX_IPV6_ICMP_PKT 0x0B
+#define RX_IPV6_UNKNOWN_PKT 0x0F
+
+#define RX_NO_PTP 0x00
+#define RX_PTP_SYNC 0x01
+#define RX_PTP_FOLLOW_UP 0x02
+#define RX_PTP_DELAY_REQ 0x03
+#define RX_PTP_DELAY_RESP 0x04
+#define RX_PTP_PDELAY_REQ 0x05
+#define RX_PTP_PDELAY_RESP 0x06
+#define RX_PTP_PDELAY_FOLLOW_UP 0x07
+#define RX_PTP_ANNOUNCE 0x08
+#define RX_PTP_MGMT 0x09
+#define RX_PTP_SIGNAL 0x0A
+#define RX_PTP_RESV_MSG 0x0F
+
+/* EEE-LPI mode flags*/
+#define TX_ENTRY_LPI_MODE 0x10
+#define TX_EXIT_LPI_MODE 0x20
+#define RX_ENTRY_LPI_MODE 0x40
+#define RX_EXIT_LPI_MODE 0x80
+
+/* EEE-LPI Interrupt status flag */
+#define LPI_INT_STATUS BIT(5)
+
+/* EEE-LPI Default timer values */
+#define LPI_LINK_STATUS_TIMER 0x3E8
+#define LPI_MAC_WAIT_TIMER 0x00
+
+/* EEE-LPI Control and status definitions */
+#define LPI_CTRL_STATUS_TXA BIT(19)
+#define LPI_CTRL_STATUS_PLSDIS BIT(18)
+#define LPI_CTRL_STATUS_PLS BIT(17)
+#define LPI_CTRL_STATUS_LPIEN BIT(16)
+#define LPI_CTRL_STATUS_TXRSTP BIT(11)
+#define LPI_CTRL_STATUS_RXRSTP BIT(10)
+#define LPI_CTRL_STATUS_RLPIST BIT(9)
+#define LPI_CTRL_STATUS_TLPIST BIT(8)
+#define LPI_CTRL_STATUS_RLPIEX BIT(3)
+#define LPI_CTRL_STATUS_RLPIEN BIT(2)
+#define LPI_CTRL_STATUS_TLPIEX BIT(1)
+#define LPI_CTRL_STATUS_TLPIEN BIT(0)
+
+enum dma_irq_status {
+ tx_hard_error = BIT(0),
+ tx_bump_tc = BIT(1),
+ handle_tx = BIT(2),
+ rx_hard_error = BIT(3),
+ rx_bump_tc = BIT(4),
+ handle_rx = BIT(5),
+};
+
+#define NETIF_F_HW_VLAN_ALL (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_TX | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+/* MMC control defines */
+#define SXGBE_MMC_CTRL_CNT_FRZ 0x00000008
+
+/* SXGBE HW ADDR regs */
+#define SXGBE_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
+ (reg * 8))
+#define SXGBE_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
+ (reg * 8))
+#define SXGBE_MAX_PERFECT_ADDRESSES 32 /* Maximum unicast perfect filtering */
+#define SXGBE_FRAME_FILTER 0x00000004 /* Frame Filter */
+
+/* SXGBE Frame Filter defines */
+#define SXGBE_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define SXGBE_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define SXGBE_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define SXGBE_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define SXGBE_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define SXGBE_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define SXGBE_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define SXGBE_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define SXGBE_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define SXGBE_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+
+#define SXGBE_HASH_TABLE_SIZE 64
+#define SXGBE_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
+#define SXGBE_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
+
+#define SXGBE_HI_REG_AE 0x80000000
+
+/* Minimum and maximum MTU */
+#define MIN_MTU 68
+#define MAX_MTU 9000
+
+#define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \
+ for (queue_num = 0; queue_num < max_queues; queue_num++)
+
+#define DRV_VERSION "1.0.0"
+
+#define SXGBE_MAX_RX_CHANNELS 16
+#define SXGBE_MAX_TX_CHANNELS 16
+
+#define START_MAC_REG_OFFSET 0x0000
+#define MAX_MAC_REG_OFFSET 0x0DFC
+#define START_MTL_REG_OFFSET 0x1000
+#define MAX_MTL_REG_OFFSET 0x18FC
+#define START_DMA_REG_OFFSET 0x3000
+#define MAX_DMA_REG_OFFSET 0x38FC
+
+#define REG_SPACE_SIZE 0x2000
+
+/* sxgbe statistics counters */
+struct sxgbe_extra_stats {
+ /* TX/RX IRQ events */
+ unsigned long tx_underflow_irq;
+ unsigned long tx_process_stopped_irq;
+ unsigned long tx_ctxt_desc_err;
+ unsigned long tx_threshold;
+ unsigned long rx_threshold;
+ unsigned long tx_pkt_n;
+ unsigned long rx_pkt_n;
+ unsigned long normal_irq_n;
+ unsigned long tx_normal_irq_n;
+ unsigned long rx_normal_irq_n;
+ unsigned long napi_poll;
+ unsigned long tx_clean;
+ unsigned long tx_reset_ic_bit;
+ unsigned long rx_process_stopped_irq;
+ unsigned long rx_underflow_irq;
+
+ /* Bus access errors */
+ unsigned long fatal_bus_error_irq;
+ unsigned long tx_read_transfer_err;
+ unsigned long tx_write_transfer_err;
+ unsigned long tx_desc_access_err;
+ unsigned long tx_buffer_access_err;
+ unsigned long tx_data_transfer_err;
+ unsigned long rx_read_transfer_err;
+ unsigned long rx_write_transfer_err;
+ unsigned long rx_desc_access_err;
+ unsigned long rx_buffer_access_err;
+ unsigned long rx_data_transfer_err;
+
+ /* EEE-LPI stats */
+ unsigned long tx_lpi_entry_n;
+ unsigned long tx_lpi_exit_n;
+ unsigned long rx_lpi_entry_n;
+ unsigned long rx_lpi_exit_n;
+ unsigned long eee_wakeup_error_n;
+
+ /* RX specific */
+ /* L2 error */
+ unsigned long rx_code_gmii_err;
+ unsigned long rx_watchdog_err;
+ unsigned long rx_crc_err;
+ unsigned long rx_gaint_pkt_err;
+ unsigned long ip_hdr_err;
+ unsigned long ip_payload_err;
+ unsigned long overflow_error;
+
+ /* L2 Pkt type */
+ unsigned long len_pkt;
+ unsigned long mac_ctl_pkt;
+ unsigned long dcb_ctl_pkt;
+ unsigned long arp_pkt;
+ unsigned long oam_pkt;
+ unsigned long untag_okt;
+ unsigned long other_pkt;
+ unsigned long svlan_tag_pkt;
+ unsigned long cvlan_tag_pkt;
+ unsigned long dvlan_ocvlan_icvlan_pkt;
+ unsigned long dvlan_osvlan_isvlan_pkt;
+ unsigned long dvlan_osvlan_icvlan_pkt;
+ unsigned long dvan_ocvlan_icvlan_pkt;
+
+ /* L3/L4 Pkt type */
+ unsigned long not_ip_pkt;
+ unsigned long ip4_tcp_pkt;
+ unsigned long ip4_udp_pkt;
+ unsigned long ip4_icmp_pkt;
+ unsigned long ip4_unknown_pkt;
+ unsigned long ip6_tcp_pkt;
+ unsigned long ip6_udp_pkt;
+ unsigned long ip6_icmp_pkt;
+ unsigned long ip6_unknown_pkt;
+
+ /* Filter specific */
+ unsigned long vlan_filter_match;
+ unsigned long sa_filter_fail;
+ unsigned long da_filter_fail;
+ unsigned long hash_filter_pass;
+ unsigned long l3_filter_match;
+ unsigned long l4_filter_match;
+
+ /* RX context specific */
+ unsigned long timestamp_dropped;
+ unsigned long rx_msg_type_no_ptp;
+ unsigned long rx_ptp_type_sync;
+ unsigned long rx_ptp_type_follow_up;
+ unsigned long rx_ptp_type_delay_req;
+ unsigned long rx_ptp_type_delay_resp;
+ unsigned long rx_ptp_type_pdelay_req;
+ unsigned long rx_ptp_type_pdelay_resp;
+ unsigned long rx_ptp_type_pdelay_follow_up;
+ unsigned long rx_ptp_announce;
+ unsigned long rx_ptp_mgmt;
+ unsigned long rx_ptp_signal;
+ unsigned long rx_ptp_resv_msg_type;
+};
+
+struct mac_link {
+ int port;
+ int duplex;
+ int speed;
+};
+
+struct mii_regs {
+ unsigned int addr; /* MII Address */
+ unsigned int data; /* MII Data */
+};
+
+struct sxgbe_core_ops {
+ /* MAC core initialization */
+ void (*core_init)(void __iomem *ioaddr);
+ /* Dump MAC registers */
+ void (*dump_regs)(void __iomem *ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ int (*host_irq_status)(void __iomem *ioaddr,
+ struct sxgbe_extra_stats *x);
+ /* Set power management mode (e.g. magic frame) */
+ void (*pmt)(void __iomem *ioaddr, unsigned long mode);
+ /* Set/Get Unicast MAC addresses */
+ void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*enable_rx)(void __iomem *ioaddr, bool enable);
+ void (*enable_tx)(void __iomem *ioaddr, bool enable);
+
+ /* controller version specific operations */
+ int (*get_controller_version)(void __iomem *ioaddr);
+
+ /* If supported then get the optional core features */
+ unsigned int (*get_hw_feature)(void __iomem *ioaddr,
+ unsigned char feature_index);
+ /* adjust SXGBE speed */
+ void (*set_speed)(void __iomem *ioaddr, unsigned char speed);
+
+ /* EEE-LPI specific operations */
+ void (*set_eee_mode)(void __iomem *ioaddr);
+ void (*reset_eee_mode)(void __iomem *ioaddr);
+ void (*set_eee_timer)(void __iomem *ioaddr, const int ls,
+ const int tw);
+ void (*set_eee_pls)(void __iomem *ioaddr, const int link);
+
+ /* Enable disable checksum offload operations */
+ void (*enable_rx_csum)(void __iomem *ioaddr);
+ void (*disable_rx_csum)(void __iomem *ioaddr);
+};
+
+const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
+
+struct sxgbe_ops {
+ const struct sxgbe_core_ops *mac;
+ const struct sxgbe_desc_ops *desc;
+ const struct sxgbe_dma_ops *dma;
+ const struct sxgbe_mtl_ops *mtl;
+ struct mii_regs mii; /* MII register Addresses */
+ struct mac_link link;
+ unsigned int ctrl_uid;
+ unsigned int ctrl_id;
+};
+
+/* SXGBE private data structures */
+struct sxgbe_tx_queue {
+ unsigned int irq_no;
+ struct sxgbe_priv_data *priv_ptr;
+ struct sxgbe_tx_norm_desc *dma_tx;
+ dma_addr_t dma_tx_phy;
+ dma_addr_t *tx_skbuff_dma;
+ struct sk_buff **tx_skbuff;
+ struct timer_list txtimer;
+ spinlock_t tx_lock; /* lock for tx queues */
+ unsigned int cur_tx;
+ unsigned int dirty_tx;
+ u32 tx_count_frames;
+ u32 tx_coal_frames;
+ u32 tx_coal_timer;
+ int hwts_tx_en;
+ u16 prev_mss;
+ u8 queue_no;
+};
+
+struct sxgbe_rx_queue {
+ struct sxgbe_priv_data *priv_ptr;
+ struct sxgbe_rx_norm_desc *dma_rx;
+ struct sk_buff **rx_skbuff;
+ unsigned int cur_rx;
+ unsigned int dirty_rx;
+ unsigned int irq_no;
+ u32 rx_riwt;
+ dma_addr_t *rx_skbuff_dma;
+ dma_addr_t dma_rx_phy;
+ u8 queue_no;
+};
+
+/* SXGBE HW capabilities */
+struct sxgbe_hw_features {
+ /****** CAP [0] *******/
+ unsigned int pmt_remote_wake_up;
+ unsigned int pmt_magic_frame;
+ /* IEEE 1588-2008 */
+ unsigned int atime_stamp;
+
+ unsigned int eee;
+
+ unsigned int tx_csum_offload;
+ unsigned int rx_csum_offload;
+ unsigned int multi_macaddr;
+ unsigned int tstamp_srcselect;
+ unsigned int sa_vlan_insert;
+
+ /****** CAP [1] *******/
+ unsigned int rxfifo_size;
+ unsigned int txfifo_size;
+ unsigned int atstmap_hword;
+ unsigned int dcb_enable;
+ unsigned int splithead_enable;
+ unsigned int tcpseg_offload;
+ unsigned int debug_mem;
+ unsigned int rss_enable;
+ unsigned int hash_tsize;
+ unsigned int l3l4_filer_size;
+
+ /* This value is in bytes and
+ * as mentioned in HW features
+ * of SXGBE data book
+ */
+ unsigned int rx_mtl_qsize;
+ unsigned int tx_mtl_qsize;
+
+ /****** CAP [2] *******/
+ /* TX and RX number of channels */
+ unsigned int rx_mtl_queues;
+ unsigned int tx_mtl_queues;
+ unsigned int rx_dma_channels;
+ unsigned int tx_dma_channels;
+ unsigned int pps_output_count;
+ unsigned int aux_input_count;
+};
+
+struct sxgbe_priv_data {
+ /* DMA descriptos */
+ struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES];
+ struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES];
+ u8 cur_rx_qnum;
+
+ unsigned int dma_tx_size;
+ unsigned int dma_rx_size;
+ unsigned int dma_buf_sz;
+ u32 rx_riwt;
+
+ struct napi_struct napi;
+
+ void __iomem *ioaddr;
+ struct net_device *dev;
+ struct device *device;
+ struct sxgbe_ops *hw; /* sxgbe specific ops */
+ int no_csum_insertion;
+ int irq;
+ int rxcsum_insertion;
+ spinlock_t stats_lock; /* lock for tx/rx statatics */
+
+ struct phy_device *phydev;
+ int oldlink;
+ int speed;
+ int oldduplex;
+ struct mii_bus *mii;
+ int mii_irq[PHY_MAX_ADDR];
+ u8 rx_pause;
+ u8 tx_pause;
+
+ struct sxgbe_extra_stats xstats;
+ struct sxgbe_plat_data *plat;
+ struct sxgbe_hw_features hw_cap;
+
+ u32 msg_enable;
+
+ struct clk *sxgbe_clk;
+ int clk_csr;
+ unsigned int mode;
+ unsigned int default_addend;
+
+ /* advanced time stamp support */
+ u32 adv_ts;
+ int use_riwt;
+ struct ptp_clock *ptp_clock;
+
+ /* tc control */
+ int tx_tc;
+ int rx_tc;
+ /* EEE-LPI specific members */
+ struct timer_list eee_ctrl_timer;
+ bool tx_path_in_lpi_mode;
+ int lpi_irq;
+ int eee_enabled;
+ int eee_active;
+ int tx_lpi_timer;
+};
+
+/* Function prototypes */
+struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
+ struct sxgbe_plat_data *plat_dat,
+ void __iomem *addr);
+int sxgbe_drv_remove(struct net_device *ndev);
+void sxgbe_set_ethtool_ops(struct net_device *netdev);
+int sxgbe_mdio_unregister(struct net_device *ndev);
+int sxgbe_mdio_register(struct net_device *ndev);
+int sxgbe_register_platform(void);
+void sxgbe_unregister_platform(void);
+
+#ifdef CONFIG_PM
+int sxgbe_suspend(struct net_device *ndev);
+int sxgbe_resume(struct net_device *ndev);
+int sxgbe_freeze(struct net_device *ndev);
+int sxgbe_restore(struct net_device *ndev);
+#endif /* CONFIG_PM */
+
+const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
+
+void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv);
+bool sxgbe_eee_init(struct sxgbe_priv_data * const priv);
+#endif /* __SXGBE_COMMON_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
new file mode 100644
index 000000000000..c4da7a2b002a
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -0,0 +1,262 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+
+/* MAC core initialization */
+static void sxgbe_core_init(void __iomem *ioaddr)
+{
+ u32 regval;
+
+ /* TX configuration */
+ regval = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+ /* Other configurable parameters IFP, IPG, ISR, ISM
+ * needs to be set if needed
+ */
+ regval |= SXGBE_TX_JABBER_DISABLE;
+ writel(regval, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+
+ /* RX configuration */
+ regval = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ /* Other configurable parameters CST, SPEN, USP, GPSLCE
+ * WD, LM, S2KP, HDSMS, GPSL, ELEN, ARPEN needs to be
+ * set if needed
+ */
+ regval |= SXGBE_RX_JUMBPKT_ENABLE | SXGBE_RX_ACS_ENABLE;
+ writel(regval, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+/* Dump MAC registers */
+static void sxgbe_core_dump_regs(void __iomem *ioaddr)
+{
+}
+
+static int sxgbe_get_lpi_status(void __iomem *ioaddr, const u32 irq_status)
+{
+ int status = 0;
+ int lpi_status;
+
+ /* Reading this register shall clear all the LPI status bits */
+ lpi_status = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEN)
+ status |= TX_ENTRY_LPI_MODE;
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEX)
+ status |= TX_EXIT_LPI_MODE;
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEN)
+ status |= RX_ENTRY_LPI_MODE;
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEX)
+ status |= RX_EXIT_LPI_MODE;
+
+ return status;
+}
+
+/* Handle extra events on specific interrupts hw dependent */
+static int sxgbe_core_host_irq_status(void __iomem *ioaddr,
+ struct sxgbe_extra_stats *x)
+{
+ int irq_status, status = 0;
+
+ irq_status = readl(ioaddr + SXGBE_CORE_INT_STATUS_REG);
+
+ if (unlikely(irq_status & LPI_INT_STATUS))
+ status |= sxgbe_get_lpi_status(ioaddr, irq_status);
+
+ return status;
+}
+
+/* Set power management mode (e.g. magic frame) */
+static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+}
+
+/* Set/Get Unicast MAC addresses */
+static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ u32 high_word, low_word;
+
+ high_word = (addr[5] << 8) | (addr[4]);
+ low_word = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0]);
+ writel(high_word, ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
+ writel(low_word, ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
+}
+
+static void sxgbe_core_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ u32 high_word, low_word;
+
+ high_word = readl(ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
+ low_word = readl(ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
+
+ /* extract and assign address */
+ addr[5] = (high_word & 0x0000FF00) >> 8;
+ addr[4] = (high_word & 0x000000FF);
+ addr[3] = (low_word & 0xFF000000) >> 24;
+ addr[2] = (low_word & 0x00FF0000) >> 16;
+ addr[1] = (low_word & 0x0000FF00) >> 8;
+ addr[0] = (low_word & 0x000000FF);
+}
+
+static void sxgbe_enable_tx(void __iomem *ioaddr, bool enable)
+{
+ u32 tx_config;
+
+ tx_config = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+ tx_config &= ~SXGBE_TX_ENABLE;
+
+ if (enable)
+ tx_config |= SXGBE_TX_ENABLE;
+ writel(tx_config, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+}
+
+static void sxgbe_enable_rx(void __iomem *ioaddr, bool enable)
+{
+ u32 rx_config;
+
+ rx_config = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ rx_config &= ~SXGBE_RX_ENABLE;
+
+ if (enable)
+ rx_config |= SXGBE_RX_ENABLE;
+ writel(rx_config, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static int sxgbe_get_controller_version(void __iomem *ioaddr)
+{
+ return readl(ioaddr + SXGBE_CORE_VERSION_REG);
+}
+
+/* If supported then get the optional core features */
+static unsigned int sxgbe_get_hw_feature(void __iomem *ioaddr,
+ unsigned char feature_index)
+{
+ return readl(ioaddr + (SXGBE_CORE_HW_FEA_REG(feature_index)));
+}
+
+static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
+{
+ u32 tx_cfg = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+
+ /* clear the speed bits */
+ tx_cfg &= ~0x60000000;
+ tx_cfg |= (speed << SXGBE_SPEED_LSHIFT);
+
+ /* set the speed */
+ writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+}
+
+static void sxgbe_set_eee_mode(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ /* Enable the LPI mode for transmit path with Tx automate bit set.
+ * When Tx Automate bit is set, MAC internally handles the entry
+ * to LPI mode after all outstanding and pending packets are
+ * transmitted.
+ */
+ ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+ ctrl |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA;
+ writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+}
+
+static void sxgbe_reset_eee_mode(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+ ctrl &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA);
+ writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+}
+
+static void sxgbe_set_eee_pls(void __iomem *ioaddr, const int link)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+
+ /* If the PHY link status is UP then set PLS */
+ if (link)
+ ctrl |= LPI_CTRL_STATUS_PLS;
+ else
+ ctrl &= ~LPI_CTRL_STATUS_PLS;
+
+ writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+}
+
+static void sxgbe_set_eee_timer(void __iomem *ioaddr,
+ const int ls, const int tw)
+{
+ int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
+}
+
+static void sxgbe_enable_rx_csum(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
+ writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static void sxgbe_disable_rx_csum(void __iomem *ioaddr)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+ ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
+ writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static const struct sxgbe_core_ops core_ops = {
+ .core_init = sxgbe_core_init,
+ .dump_regs = sxgbe_core_dump_regs,
+ .host_irq_status = sxgbe_core_host_irq_status,
+ .pmt = sxgbe_core_pmt,
+ .set_umac_addr = sxgbe_core_set_umac_addr,
+ .get_umac_addr = sxgbe_core_get_umac_addr,
+ .enable_rx = sxgbe_enable_rx,
+ .enable_tx = sxgbe_enable_tx,
+ .get_controller_version = sxgbe_get_controller_version,
+ .get_hw_feature = sxgbe_get_hw_feature,
+ .set_speed = sxgbe_core_set_speed,
+ .set_eee_mode = sxgbe_set_eee_mode,
+ .reset_eee_mode = sxgbe_reset_eee_mode,
+ .set_eee_timer = sxgbe_set_eee_timer,
+ .set_eee_pls = sxgbe_set_eee_pls,
+ .enable_rx_csum = sxgbe_enable_rx_csum,
+ .disable_rx_csum = sxgbe_disable_rx_csum,
+};
+
+const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
+{
+ return &core_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
new file mode 100644
index 000000000000..e896dbbd2e15
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -0,0 +1,515 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_dma.h"
+#include "sxgbe_desc.h"
+
+/* DMA TX descriptor ring initialization */
+static void sxgbe_init_tx_desc(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.own_bit = 0;
+}
+
+static void sxgbe_tx_desc_enable_tse(struct sxgbe_tx_norm_desc *p, u8 is_tse,
+ u32 total_hdr_len, u32 tcp_hdr_len,
+ u32 tcp_payload_len)
+{
+ p->tdes23.tx_rd_des23.tse_bit = is_tse;
+ p->tdes23.tx_rd_des23.buf1_size = total_hdr_len;
+ p->tdes23.tx_rd_des23.tcp_hdr_len = tcp_hdr_len / 4;
+ p->tdes23.tx_rd_des23.tx_pkt_len.tcp_payload_len = tcp_payload_len;
+}
+
+/* Assign buffer lengths for descriptor */
+static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
+ int buf1_len, int pkt_len, int cksum)
+{
+ p->tdes23.tx_rd_des23.first_desc = is_fd;
+ p->tdes23.tx_rd_des23.buf1_size = buf1_len;
+
+ p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len;
+
+ if (cksum)
+ p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full;
+}
+
+/* Set VLAN control information */
+static void sxgbe_tx_vlanctl_desc(struct sxgbe_tx_norm_desc *p, int vlan_ctl)
+{
+ p->tdes23.tx_rd_des23.vlan_tag_ctl = vlan_ctl;
+}
+
+/* Set the owner of Normal descriptor */
+static void sxgbe_set_tx_owner(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.own_bit = 1;
+}
+
+/* Get the owner of Normal descriptor */
+static int sxgbe_get_tx_owner(struct sxgbe_tx_norm_desc *p)
+{
+ return p->tdes23.tx_rd_des23.own_bit;
+}
+
+/* Invoked by the xmit function to close the tx descriptor */
+static void sxgbe_close_tx_desc(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.last_desc = 1;
+ p->tdes23.tx_rd_des23.int_on_com = 1;
+}
+
+/* Clean the tx descriptor as soon as the tx irq is received */
+static void sxgbe_release_tx_desc(struct sxgbe_tx_norm_desc *p)
+{
+ memset(p, 0, sizeof(*p));
+}
+
+/* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted
+ */
+static void sxgbe_clear_tx_ic(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.int_on_com = 0;
+}
+
+/* Last tx segment reports the transmit status */
+static int sxgbe_get_tx_ls(struct sxgbe_tx_norm_desc *p)
+{
+ return p->tdes23.tx_rd_des23.last_desc;
+}
+
+/* Get the buffer size from the descriptor */
+static int sxgbe_get_tx_len(struct sxgbe_tx_norm_desc *p)
+{
+ return p->tdes23.tx_rd_des23.buf1_size;
+}
+
+/* Set tx timestamp enable bit */
+static void sxgbe_tx_enable_tstamp(struct sxgbe_tx_norm_desc *p)
+{
+ p->tdes23.tx_rd_des23.timestmp_enable = 1;
+}
+
+/* get tx timestamp status */
+static int sxgbe_get_tx_timestamp_status(struct sxgbe_tx_norm_desc *p)
+{
+ return p->tdes23.tx_rd_des23.timestmp_enable;
+}
+
+/* TX Context Descripto Specific */
+static void sxgbe_tx_ctxt_desc_set_ctxt(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->ctxt_bit = 1;
+}
+
+/* Set the owner of TX context descriptor */
+static void sxgbe_tx_ctxt_desc_set_owner(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->own_bit = 1;
+}
+
+/* Get the owner of TX context descriptor */
+static int sxgbe_tx_ctxt_desc_get_owner(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->own_bit;
+}
+
+/* Set TX mss in TX context Descriptor */
+static void sxgbe_tx_ctxt_desc_set_mss(struct sxgbe_tx_ctxt_desc *p, u16 mss)
+{
+ p->maxseg_size = mss;
+}
+
+/* Get TX mss from TX context Descriptor */
+static int sxgbe_tx_ctxt_desc_get_mss(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->maxseg_size;
+}
+
+/* Set TX tcmssv in TX context Descriptor */
+static void sxgbe_tx_ctxt_desc_set_tcmssv(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->tcmssv = 1;
+}
+
+/* Reset TX ostc in TX context Descriptor */
+static void sxgbe_tx_ctxt_desc_reset_ostc(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->ostc = 0;
+}
+
+/* Set IVLAN information */
+static void sxgbe_tx_ctxt_desc_set_ivlantag(struct sxgbe_tx_ctxt_desc *p,
+ int is_ivlanvalid, int ivlan_tag,
+ int ivlan_ctl)
+{
+ if (is_ivlanvalid) {
+ p->ivlan_tag_valid = is_ivlanvalid;
+ p->ivlan_tag = ivlan_tag;
+ p->ivlan_tag_ctl = ivlan_ctl;
+ }
+}
+
+/* Return IVLAN Tag */
+static int sxgbe_tx_ctxt_desc_get_ivlantag(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->ivlan_tag;
+}
+
+/* Set VLAN Tag */
+static void sxgbe_tx_ctxt_desc_set_vlantag(struct sxgbe_tx_ctxt_desc *p,
+ int is_vlanvalid, int vlan_tag)
+{
+ if (is_vlanvalid) {
+ p->vltag_valid = is_vlanvalid;
+ p->vlan_tag = vlan_tag;
+ }
+}
+
+/* Return VLAN Tag */
+static int sxgbe_tx_ctxt_desc_get_vlantag(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->vlan_tag;
+}
+
+/* Set Time stamp */
+static void sxgbe_tx_ctxt_desc_set_tstamp(struct sxgbe_tx_ctxt_desc *p,
+ u8 ostc_enable, u64 tstamp)
+{
+ if (ostc_enable) {
+ p->ostc = ostc_enable;
+ p->tstamp_lo = (u32) tstamp;
+ p->tstamp_hi = (u32) (tstamp>>32);
+ }
+}
+/* Close TX context descriptor */
+static void sxgbe_tx_ctxt_desc_close(struct sxgbe_tx_ctxt_desc *p)
+{
+ p->own_bit = 1;
+}
+
+/* WB status of context descriptor */
+static int sxgbe_tx_ctxt_desc_get_cde(struct sxgbe_tx_ctxt_desc *p)
+{
+ return p->ctxt_desc_err;
+}
+
+/* DMA RX descriptor ring initialization */
+static void sxgbe_init_rx_desc(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
+ int mode, int end)
+{
+ p->rdes23.rx_rd_des23.own_bit = 1;
+ if (disable_rx_ic)
+ p->rdes23.rx_rd_des23.int_on_com = disable_rx_ic;
+}
+
+/* Get RX own bit */
+static int sxgbe_get_rx_owner(struct sxgbe_rx_norm_desc *p)
+{
+ return p->rdes23.rx_rd_des23.own_bit;
+}
+
+/* Set RX own bit */
+static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
+{
+ p->rdes23.rx_rd_des23.own_bit = 1;
+}
+
+/* Get the receive frame size */
+static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
+{
+ return p->rdes23.rx_wb_des23.pkt_len;
+}
+
+/* Return first Descriptor status */
+static int sxgbe_get_rx_fd_status(struct sxgbe_rx_norm_desc *p)
+{
+ return p->rdes23.rx_wb_des23.first_desc;
+}
+
+/* Return Last Descriptor status */
+static int sxgbe_get_rx_ld_status(struct sxgbe_rx_norm_desc *p)
+{
+ return p->rdes23.rx_wb_des23.last_desc;
+}
+
+
+/* Return the RX status looking at the WB fields */
+static int sxgbe_rx_wbstatus(struct sxgbe_rx_norm_desc *p,
+ struct sxgbe_extra_stats *x, int *checksum)
+{
+ int status = 0;
+
+ *checksum = CHECKSUM_UNNECESSARY;
+ if (p->rdes23.rx_wb_des23.err_summary) {
+ switch (p->rdes23.rx_wb_des23.err_l2_type) {
+ case RX_GMII_ERR:
+ status = -EINVAL;
+ x->rx_code_gmii_err++;
+ break;
+ case RX_WATCHDOG_ERR:
+ status = -EINVAL;
+ x->rx_watchdog_err++;
+ break;
+ case RX_CRC_ERR:
+ status = -EINVAL;
+ x->rx_crc_err++;
+ break;
+ case RX_GAINT_ERR:
+ status = -EINVAL;
+ x->rx_gaint_pkt_err++;
+ break;
+ case RX_IP_HDR_ERR:
+ *checksum = CHECKSUM_NONE;
+ x->ip_hdr_err++;
+ break;
+ case RX_PAYLOAD_ERR:
+ *checksum = CHECKSUM_NONE;
+ x->ip_payload_err++;
+ break;
+ case RX_OVERFLOW_ERR:
+ status = -EINVAL;
+ x->overflow_error++;
+ break;
+ default:
+ pr_err("Invalid Error type\n");
+ break;
+ }
+ } else {
+ switch (p->rdes23.rx_wb_des23.err_l2_type) {
+ case RX_LEN_PKT:
+ x->len_pkt++;
+ break;
+ case RX_MACCTL_PKT:
+ x->mac_ctl_pkt++;
+ break;
+ case RX_DCBCTL_PKT:
+ x->dcb_ctl_pkt++;
+ break;
+ case RX_ARP_PKT:
+ x->arp_pkt++;
+ break;
+ case RX_OAM_PKT:
+ x->oam_pkt++;
+ break;
+ case RX_UNTAG_PKT:
+ x->untag_okt++;
+ break;
+ case RX_OTHER_PKT:
+ x->other_pkt++;
+ break;
+ case RX_SVLAN_PKT:
+ x->svlan_tag_pkt++;
+ break;
+ case RX_CVLAN_PKT:
+ x->cvlan_tag_pkt++;
+ break;
+ case RX_DVLAN_OCVLAN_ICVLAN_PKT:
+ x->dvlan_ocvlan_icvlan_pkt++;
+ break;
+ case RX_DVLAN_OSVLAN_ISVLAN_PKT:
+ x->dvlan_osvlan_isvlan_pkt++;
+ break;
+ case RX_DVLAN_OSVLAN_ICVLAN_PKT:
+ x->dvlan_osvlan_icvlan_pkt++;
+ break;
+ case RX_DVLAN_OCVLAN_ISVLAN_PKT:
+ x->dvlan_ocvlan_icvlan_pkt++;
+ break;
+ default:
+ pr_err("Invalid L2 Packet type\n");
+ break;
+ }
+ }
+
+ /* L3/L4 Pkt type */
+ switch (p->rdes23.rx_wb_des23.layer34_pkt_type) {
+ case RX_NOT_IP_PKT:
+ x->not_ip_pkt++;
+ break;
+ case RX_IPV4_TCP_PKT:
+ x->ip4_tcp_pkt++;
+ break;
+ case RX_IPV4_UDP_PKT:
+ x->ip4_udp_pkt++;
+ break;
+ case RX_IPV4_ICMP_PKT:
+ x->ip4_icmp_pkt++;
+ break;
+ case RX_IPV4_UNKNOWN_PKT:
+ x->ip4_unknown_pkt++;
+ break;
+ case RX_IPV6_TCP_PKT:
+ x->ip6_tcp_pkt++;
+ break;
+ case RX_IPV6_UDP_PKT:
+ x->ip6_udp_pkt++;
+ break;
+ case RX_IPV6_ICMP_PKT:
+ x->ip6_icmp_pkt++;
+ break;
+ case RX_IPV6_UNKNOWN_PKT:
+ x->ip6_unknown_pkt++;
+ break;
+ default:
+ pr_err("Invalid L3/L4 Packet type\n");
+ break;
+ }
+
+ /* Filter */
+ if (p->rdes23.rx_wb_des23.vlan_filter_match)
+ x->vlan_filter_match++;
+
+ if (p->rdes23.rx_wb_des23.sa_filter_fail) {
+ status = -EINVAL;
+ x->sa_filter_fail++;
+ }
+ if (p->rdes23.rx_wb_des23.da_filter_fail) {
+ status = -EINVAL;
+ x->da_filter_fail++;
+ }
+ if (p->rdes23.rx_wb_des23.hash_filter_pass)
+ x->hash_filter_pass++;
+
+ if (p->rdes23.rx_wb_des23.l3_filter_match)
+ x->l3_filter_match++;
+
+ if (p->rdes23.rx_wb_des23.l4_filter_match)
+ x->l4_filter_match++;
+
+ return status;
+}
+
+/* Get own bit of context descriptor */
+static int sxgbe_get_rx_ctxt_owner(struct sxgbe_rx_ctxt_desc *p)
+{
+ return p->own_bit;
+}
+
+/* Set own bit for context descriptor */
+static void sxgbe_set_ctxt_rx_owner(struct sxgbe_rx_ctxt_desc *p)
+{
+ p->own_bit = 1;
+}
+
+
+/* Return the reception status looking at Context control information */
+static void sxgbe_rx_ctxt_wbstatus(struct sxgbe_rx_ctxt_desc *p,
+ struct sxgbe_extra_stats *x)
+{
+ if (p->tstamp_dropped)
+ x->timestamp_dropped++;
+
+ /* ptp */
+ if (p->ptp_msgtype == RX_NO_PTP)
+ x->rx_msg_type_no_ptp++;
+ else if (p->ptp_msgtype == RX_PTP_SYNC)
+ x->rx_ptp_type_sync++;
+ else if (p->ptp_msgtype == RX_PTP_FOLLOW_UP)
+ x->rx_ptp_type_follow_up++;
+ else if (p->ptp_msgtype == RX_PTP_DELAY_REQ)
+ x->rx_ptp_type_delay_req++;
+ else if (p->ptp_msgtype == RX_PTP_DELAY_RESP)
+ x->rx_ptp_type_delay_resp++;
+ else if (p->ptp_msgtype == RX_PTP_PDELAY_REQ)
+ x->rx_ptp_type_pdelay_req++;
+ else if (p->ptp_msgtype == RX_PTP_PDELAY_RESP)
+ x->rx_ptp_type_pdelay_resp++;
+ else if (p->ptp_msgtype == RX_PTP_PDELAY_FOLLOW_UP)
+ x->rx_ptp_type_pdelay_follow_up++;
+ else if (p->ptp_msgtype == RX_PTP_ANNOUNCE)
+ x->rx_ptp_announce++;
+ else if (p->ptp_msgtype == RX_PTP_MGMT)
+ x->rx_ptp_mgmt++;
+ else if (p->ptp_msgtype == RX_PTP_SIGNAL)
+ x->rx_ptp_signal++;
+ else if (p->ptp_msgtype == RX_PTP_RESV_MSG)
+ x->rx_ptp_resv_msg_type++;
+}
+
+/* Get rx timestamp status */
+static int sxgbe_get_rx_ctxt_tstamp_status(struct sxgbe_rx_ctxt_desc *p)
+{
+ if ((p->tstamp_hi == 0xffffffff) && (p->tstamp_lo == 0xffffffff)) {
+ pr_err("Time stamp corrupted\n");
+ return 0;
+ }
+
+ return p->tstamp_available;
+}
+
+
+static u64 sxgbe_get_rx_timestamp(struct sxgbe_rx_ctxt_desc *p)
+{
+ u64 ns;
+
+ ns = p->tstamp_lo;
+ ns |= ((u64)p->tstamp_hi) << 32;
+
+ return ns;
+}
+
+static const struct sxgbe_desc_ops desc_ops = {
+ .init_tx_desc = sxgbe_init_tx_desc,
+ .tx_desc_enable_tse = sxgbe_tx_desc_enable_tse,
+ .prepare_tx_desc = sxgbe_prepare_tx_desc,
+ .tx_vlanctl_desc = sxgbe_tx_vlanctl_desc,
+ .set_tx_owner = sxgbe_set_tx_owner,
+ .get_tx_owner = sxgbe_get_tx_owner,
+ .close_tx_desc = sxgbe_close_tx_desc,
+ .release_tx_desc = sxgbe_release_tx_desc,
+ .clear_tx_ic = sxgbe_clear_tx_ic,
+ .get_tx_ls = sxgbe_get_tx_ls,
+ .get_tx_len = sxgbe_get_tx_len,
+ .tx_enable_tstamp = sxgbe_tx_enable_tstamp,
+ .get_tx_timestamp_status = sxgbe_get_tx_timestamp_status,
+ .tx_ctxt_desc_set_ctxt = sxgbe_tx_ctxt_desc_set_ctxt,
+ .tx_ctxt_desc_set_owner = sxgbe_tx_ctxt_desc_set_owner,
+ .get_tx_ctxt_owner = sxgbe_tx_ctxt_desc_get_owner,
+ .tx_ctxt_desc_set_mss = sxgbe_tx_ctxt_desc_set_mss,
+ .tx_ctxt_desc_get_mss = sxgbe_tx_ctxt_desc_get_mss,
+ .tx_ctxt_desc_set_tcmssv = sxgbe_tx_ctxt_desc_set_tcmssv,
+ .tx_ctxt_desc_reset_ostc = sxgbe_tx_ctxt_desc_reset_ostc,
+ .tx_ctxt_desc_set_ivlantag = sxgbe_tx_ctxt_desc_set_ivlantag,
+ .tx_ctxt_desc_get_ivlantag = sxgbe_tx_ctxt_desc_get_ivlantag,
+ .tx_ctxt_desc_set_vlantag = sxgbe_tx_ctxt_desc_set_vlantag,
+ .tx_ctxt_desc_get_vlantag = sxgbe_tx_ctxt_desc_get_vlantag,
+ .tx_ctxt_set_tstamp = sxgbe_tx_ctxt_desc_set_tstamp,
+ .close_tx_ctxt_desc = sxgbe_tx_ctxt_desc_close,
+ .get_tx_ctxt_cde = sxgbe_tx_ctxt_desc_get_cde,
+ .init_rx_desc = sxgbe_init_rx_desc,
+ .get_rx_owner = sxgbe_get_rx_owner,
+ .set_rx_owner = sxgbe_set_rx_owner,
+ .get_rx_frame_len = sxgbe_get_rx_frame_len,
+ .get_rx_fd_status = sxgbe_get_rx_fd_status,
+ .get_rx_ld_status = sxgbe_get_rx_ld_status,
+ .rx_wbstatus = sxgbe_rx_wbstatus,
+ .get_rx_ctxt_owner = sxgbe_get_rx_ctxt_owner,
+ .set_rx_ctxt_owner = sxgbe_set_ctxt_rx_owner,
+ .rx_ctxt_wbstatus = sxgbe_rx_ctxt_wbstatus,
+ .get_rx_ctxt_tstamp_status = sxgbe_get_rx_ctxt_tstamp_status,
+ .get_timestamp = sxgbe_get_rx_timestamp,
+};
+
+const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void)
+{
+ return &desc_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
new file mode 100644
index 000000000000..838cb9fb0ea9
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -0,0 +1,298 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_DESC_H__
+#define __SXGBE_DESC_H__
+
+#define SXGBE_DESC_SIZE_BYTES 16
+
+/* forward declaration */
+struct sxgbe_extra_stats;
+
+/* Transmit checksum insertion control */
+enum tdes_csum_insertion {
+ cic_disabled = 0, /* Checksum Insertion Control */
+ cic_only_ip = 1, /* Only IP header */
+ /* IP header but pseudoheader is not calculated */
+ cic_no_pseudoheader = 2,
+ cic_full = 3, /* IP header and pseudoheader */
+};
+
+struct sxgbe_tx_norm_desc {
+ u64 tdes01; /* buf1 address */
+ union {
+ /* TX Read-Format Desc 2,3 */
+ struct {
+ /* TDES2 */
+ u32 buf1_size:14;
+ u32 vlan_tag_ctl:2;
+ u32 buf2_size:14;
+ u32 timestmp_enable:1;
+ u32 int_on_com:1;
+ /* TDES3 */
+ union {
+ u32 tcp_payload_len:18;
+ struct {
+ u32 total_pkt_len:15;
+ u32 reserved1:1;
+ u32 cksum_ctl:2;
+ } cksum_pktlen;
+ } tx_pkt_len;
+
+ u32 tse_bit:1;
+ u32 tcp_hdr_len:4;
+ u32 sa_insert_ctl:3;
+ u32 crc_pad_ctl:2;
+ u32 last_desc:1;
+ u32 first_desc:1;
+ u32 ctxt_bit:1;
+ u32 own_bit:1;
+ } tx_rd_des23;
+
+ /* tx write back Desc 2,3 */
+ struct {
+ /* WB TES2 */
+ u32 reserved1;
+ /* WB TES3 */
+ u32 reserved2:31;
+ u32 own_bit:1;
+ } tx_wb_des23;
+ } tdes23;
+};
+
+struct sxgbe_rx_norm_desc {
+ union {
+ u32 rdes0; /* buf1 address */
+ struct {
+ u32 out_vlan_tag:16;
+ u32 in_vlan_tag:16;
+ } wb_rx_des0;
+ } rd_wb_des0;
+
+ union {
+ u32 rdes1; /* buf2 address or buf1[63:32] */
+ u32 rss_hash; /* Write-back RX */
+ } rd_wb_des1;
+
+ union {
+ /* RX Read format Desc 2,3 */
+ struct{
+ /* RDES2 */
+ u32 buf2_addr;
+ /* RDES3 */
+ u32 buf2_hi_addr:30;
+ u32 int_on_com:1;
+ u32 own_bit:1;
+ } rx_rd_des23;
+
+ /* RX write back */
+ struct{
+ /* WB RDES2 */
+ u32 hdr_len:10;
+ u32 rdes2_reserved:2;
+ u32 elrd_val:1;
+ u32 iovt_sel:1;
+ u32 res_pkt:1;
+ u32 vlan_filter_match:1;
+ u32 sa_filter_fail:1;
+ u32 da_filter_fail:1;
+ u32 hash_filter_pass:1;
+ u32 macaddr_filter_match:8;
+ u32 l3_filter_match:1;
+ u32 l4_filter_match:1;
+ u32 l34_filter_num:3;
+
+ /* WB RDES3 */
+ u32 pkt_len:14;
+ u32 rdes3_reserved:1;
+ u32 err_summary:1;
+ u32 err_l2_type:4;
+ u32 layer34_pkt_type:4;
+ u32 no_coagulation_pkt:1;
+ u32 in_seq_pkt:1;
+ u32 rss_valid:1;
+ u32 context_des_avail:1;
+ u32 last_desc:1;
+ u32 first_desc:1;
+ u32 recv_context_desc:1;
+ u32 own_bit:1;
+ } rx_wb_des23;
+ } rdes23;
+};
+
+/* Context descriptor structure */
+struct sxgbe_tx_ctxt_desc {
+ u32 tstamp_lo;
+ u32 tstamp_hi;
+ u32 maxseg_size:15;
+ u32 reserved1:1;
+ u32 ivlan_tag:16;
+ u32 vlan_tag:16;
+ u32 vltag_valid:1;
+ u32 ivlan_tag_valid:1;
+ u32 ivlan_tag_ctl:2;
+ u32 reserved2:3;
+ u32 ctxt_desc_err:1;
+ u32 reserved3:2;
+ u32 ostc:1;
+ u32 tcmssv:1;
+ u32 reserved4:2;
+ u32 ctxt_bit:1;
+ u32 own_bit:1;
+};
+
+struct sxgbe_rx_ctxt_desc {
+ u32 tstamp_lo;
+ u32 tstamp_hi;
+ u32 reserved1;
+ u32 ptp_msgtype:4;
+ u32 tstamp_available:1;
+ u32 ptp_rsp_err:1;
+ u32 tstamp_dropped:1;
+ u32 reserved2:23;
+ u32 rx_ctxt_desc:1;
+ u32 own_bit:1;
+};
+
+struct sxgbe_desc_ops {
+ /* DMA TX descriptor ring initialization */
+ void (*init_tx_desc)(struct sxgbe_tx_norm_desc *p);
+
+ /* Invoked by the xmit function to prepare the tx descriptor */
+ void (*tx_desc_enable_tse)(struct sxgbe_tx_norm_desc *p, u8 is_tse,
+ u32 total_hdr_len, u32 tcp_hdr_len,
+ u32 tcp_payload_len);
+
+ /* Assign buffer lengths for descriptor */
+ void (*prepare_tx_desc)(struct sxgbe_tx_norm_desc *p, u8 is_fd,
+ int buf1_len, int pkt_len, int cksum);
+
+ /* Set VLAN control information */
+ void (*tx_vlanctl_desc)(struct sxgbe_tx_norm_desc *p, int vlan_ctl);
+
+ /* Set the owner of the descriptor */
+ void (*set_tx_owner)(struct sxgbe_tx_norm_desc *p);
+
+ /* Get the owner of the descriptor */
+ int (*get_tx_owner)(struct sxgbe_tx_norm_desc *p);
+
+ /* Invoked by the xmit function to close the tx descriptor */
+ void (*close_tx_desc)(struct sxgbe_tx_norm_desc *p);
+
+ /* Clean the tx descriptor as soon as the tx irq is received */
+ void (*release_tx_desc)(struct sxgbe_tx_norm_desc *p);
+
+ /* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted
+ */
+ void (*clear_tx_ic)(struct sxgbe_tx_norm_desc *p);
+
+ /* Last tx segment reports the transmit status */
+ int (*get_tx_ls)(struct sxgbe_tx_norm_desc *p);
+
+ /* Get the buffer size from the descriptor */
+ int (*get_tx_len)(struct sxgbe_tx_norm_desc *p);
+
+ /* Set tx timestamp enable bit */
+ void (*tx_enable_tstamp)(struct sxgbe_tx_norm_desc *p);
+
+ /* get tx timestamp status */
+ int (*get_tx_timestamp_status)(struct sxgbe_tx_norm_desc *p);
+
+ /* TX Context Descripto Specific */
+ void (*tx_ctxt_desc_set_ctxt)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set the owner of the TX context descriptor */
+ void (*tx_ctxt_desc_set_owner)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Get the owner of the TX context descriptor */
+ int (*get_tx_ctxt_owner)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set TX mss */
+ void (*tx_ctxt_desc_set_mss)(struct sxgbe_tx_ctxt_desc *p, u16 mss);
+
+ /* Set TX mss */
+ int (*tx_ctxt_desc_get_mss)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set TX tcmssv */
+ void (*tx_ctxt_desc_set_tcmssv)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Reset TX ostc */
+ void (*tx_ctxt_desc_reset_ostc)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set IVLAN information */
+ void (*tx_ctxt_desc_set_ivlantag)(struct sxgbe_tx_ctxt_desc *p,
+ int is_ivlanvalid, int ivlan_tag,
+ int ivlan_ctl);
+
+ /* Return IVLAN Tag */
+ int (*tx_ctxt_desc_get_ivlantag)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set VLAN Tag */
+ void (*tx_ctxt_desc_set_vlantag)(struct sxgbe_tx_ctxt_desc *p,
+ int is_vlanvalid, int vlan_tag);
+
+ /* Return VLAN Tag */
+ int (*tx_ctxt_desc_get_vlantag)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* Set Time stamp */
+ void (*tx_ctxt_set_tstamp)(struct sxgbe_tx_ctxt_desc *p,
+ u8 ostc_enable, u64 tstamp);
+
+ /* Close TX context descriptor */
+ void (*close_tx_ctxt_desc)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* WB status of context descriptor */
+ int (*get_tx_ctxt_cde)(struct sxgbe_tx_ctxt_desc *p);
+
+ /* DMA RX descriptor ring initialization */
+ void (*init_rx_desc)(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
+ int mode, int end);
+
+ /* Get own bit */
+ int (*get_rx_owner)(struct sxgbe_rx_norm_desc *p);
+
+ /* Set own bit */
+ void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
+
+ /* Get the receive frame size */
+ int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
+
+ /* Return first Descriptor status */
+ int (*get_rx_fd_status)(struct sxgbe_rx_norm_desc *p);
+
+ /* Return first Descriptor status */
+ int (*get_rx_ld_status)(struct sxgbe_rx_norm_desc *p);
+
+ /* Return the reception status looking at the RDES1 */
+ int (*rx_wbstatus)(struct sxgbe_rx_norm_desc *p,
+ struct sxgbe_extra_stats *x, int *checksum);
+
+ /* Get own bit */
+ int (*get_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
+
+ /* Set own bit */
+ void (*set_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
+
+ /* Return the reception status looking at Context control information */
+ void (*rx_ctxt_wbstatus)(struct sxgbe_rx_ctxt_desc *p,
+ struct sxgbe_extra_stats *x);
+
+ /* Get rx timestamp status */
+ int (*get_rx_ctxt_tstamp_status)(struct sxgbe_rx_ctxt_desc *p);
+
+ /* Get timestamp value for rx, need to check this */
+ u64 (*get_timestamp)(struct sxgbe_rx_ctxt_desc *p);
+};
+
+const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void);
+
+#endif /* __SXGBE_DESC_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
new file mode 100644
index 000000000000..28f89c41d0cd
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -0,0 +1,382 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_dma.h"
+#include "sxgbe_reg.h"
+#include "sxgbe_desc.h"
+
+/* DMA core initialization */
+static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
+{
+ int retry_count = 10;
+ u32 reg_val;
+
+ /* reset the DMA */
+ writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
+ while (retry_count--) {
+ if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
+ SXGBE_DMA_SOFT_RESET))
+ break;
+ mdelay(10);
+ }
+
+ if (retry_count < 0)
+ return -EBUSY;
+
+ reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
+
+ /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
+ * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
+ * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
+ * Set burst_map irrespective of fix_burst value.
+ */
+ if (!fix_burst)
+ reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
+
+ /* write burst len map */
+ reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
+
+ writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
+
+ return 0;
+}
+
+static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
+ int fix_burst, int pbl, dma_addr_t dma_tx,
+ dma_addr_t dma_rx, int t_rsize, int r_rsize)
+{
+ u32 reg_val;
+ dma_addr_t dma_addr;
+
+ reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
+ /* set the pbl */
+ if (fix_burst) {
+ reg_val |= SXGBE_DMA_PBL_X8MODE;
+ writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
+ /* program the TX pbl */
+ reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+ reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
+ writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+ /* program the RX pbl */
+ reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
+ reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
+ writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
+ }
+
+ /* program desc registers */
+ writel(upper_32_bits(dma_tx),
+ ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
+ writel(lower_32_bits(dma_tx),
+ ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
+
+ writel(upper_32_bits(dma_rx),
+ ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
+ writel(lower_32_bits(dma_rx),
+ ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
+
+ /* program tail pointers */
+ /* assumption: upper 32 bits are constant and
+ * same as TX/RX desc list
+ */
+ dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
+ writel(lower_32_bits(dma_addr),
+ ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
+
+ dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
+ writel(lower_32_bits(dma_addr),
+ ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
+ /* program the ring sizes */
+ writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
+ writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
+
+ /* Enable TX/RX interrupts */
+ writel(SXGBE_DMA_ENA_INT,
+ ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
+}
+
+static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
+{
+ u32 tx_config;
+
+ tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+ tx_config |= SXGBE_TX_START_DMA;
+ writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+}
+
+static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
+{
+ /* Enable TX/RX interrupts */
+ writel(SXGBE_DMA_ENA_INT,
+ ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
+}
+
+static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
+{
+ /* Disable TX/RX interrupts */
+ writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
+}
+
+static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
+{
+ int cnum;
+ u32 tx_ctl_reg;
+
+ for (cnum = 0; cnum < tchannels; cnum++) {
+ tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+ tx_ctl_reg |= SXGBE_TX_ENABLE;
+ writel(tx_ctl_reg,
+ ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+ }
+}
+
+static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
+{
+ u32 tx_ctl_reg;
+
+ tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+ tx_ctl_reg |= SXGBE_TX_ENABLE;
+ writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+}
+
+static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
+{
+ u32 tx_ctl_reg;
+
+ tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+ tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
+ writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+}
+
+static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
+{
+ int cnum;
+ u32 tx_ctl_reg;
+
+ for (cnum = 0; cnum < tchannels; cnum++) {
+ tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+ tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
+ writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+ }
+}
+
+static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
+{
+ int cnum;
+ u32 rx_ctl_reg;
+
+ for (cnum = 0; cnum < rchannels; cnum++) {
+ rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+ rx_ctl_reg |= SXGBE_RX_ENABLE;
+ writel(rx_ctl_reg,
+ ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+ }
+}
+
+static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
+{
+ int cnum;
+ u32 rx_ctl_reg;
+
+ for (cnum = 0; cnum < rchannels; cnum++) {
+ rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+ rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
+ writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+ }
+}
+
+static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
+ struct sxgbe_extra_stats *x)
+{
+ u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+ u32 clear_val = 0;
+ u32 ret_val = 0;
+
+ /* TX Normal Interrupt Summary */
+ if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
+ x->normal_irq_n++;
+ if (int_status & SXGBE_DMA_INT_STATUS_TI) {
+ ret_val |= handle_tx;
+ x->tx_normal_irq_n++;
+ clear_val |= SXGBE_DMA_INT_STATUS_TI;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
+ x->tx_underflow_irq++;
+ ret_val |= tx_bump_tc;
+ clear_val |= SXGBE_DMA_INT_STATUS_TBU;
+ }
+ } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
+ /* TX Abnormal Interrupt Summary */
+ if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
+ ret_val |= tx_hard_error;
+ clear_val |= SXGBE_DMA_INT_STATUS_TPS;
+ x->tx_process_stopped_irq++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
+ ret_val |= tx_hard_error;
+ x->fatal_bus_error_irq++;
+
+ /* Assumption: FBE bit is the combination of
+ * all the bus access erros and cleared when
+ * the respective error bits cleared
+ */
+
+ /* check for actual cause */
+ if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
+ x->tx_read_transfer_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
+ } else {
+ x->tx_write_transfer_err++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
+ x->tx_desc_access_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
+ } else {
+ x->tx_buffer_access_err++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
+ x->tx_data_transfer_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
+ }
+ }
+
+ /* context descriptor error */
+ if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
+ x->tx_ctxt_desc_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
+ }
+ }
+
+ /* clear the served bits */
+ writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+
+ return ret_val;
+}
+
+static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
+ struct sxgbe_extra_stats *x)
+{
+ u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+ u32 clear_val = 0;
+ u32 ret_val = 0;
+
+ /* RX Normal Interrupt Summary */
+ if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
+ x->normal_irq_n++;
+ if (int_status & SXGBE_DMA_INT_STATUS_RI) {
+ ret_val |= handle_rx;
+ x->rx_normal_irq_n++;
+ clear_val |= SXGBE_DMA_INT_STATUS_RI;
+ }
+ } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
+ /* RX Abnormal Interrupt Summary */
+ if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
+ ret_val |= rx_bump_tc;
+ clear_val |= SXGBE_DMA_INT_STATUS_RBU;
+ x->rx_underflow_irq++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
+ ret_val |= rx_hard_error;
+ clear_val |= SXGBE_DMA_INT_STATUS_RPS;
+ x->rx_process_stopped_irq++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
+ ret_val |= rx_hard_error;
+ x->fatal_bus_error_irq++;
+
+ /* Assumption: FBE bit is the combination of
+ * all the bus access erros and cleared when
+ * the respective error bits cleared
+ */
+
+ /* check for actual cause */
+ if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
+ x->rx_read_transfer_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_REB0;
+ } else {
+ x->rx_write_transfer_err++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
+ x->rx_desc_access_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_REB1;
+ } else {
+ x->rx_buffer_access_err++;
+ }
+
+ if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
+ x->rx_data_transfer_err++;
+ clear_val |= SXGBE_DMA_INT_STATUS_REB2;
+ }
+ }
+ }
+
+ /* clear the served bits */
+ writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+
+ return ret_val;
+}
+
+/* Program the HW RX Watchdog */
+static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+ u32 que_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
+ writel(riwt,
+ ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
+ }
+}
+
+static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
+{
+ u32 ctrl;
+
+ ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
+ ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
+ writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
+}
+
+static const struct sxgbe_dma_ops sxgbe_dma_ops = {
+ .init = sxgbe_dma_init,
+ .cha_init = sxgbe_dma_channel_init,
+ .enable_dma_transmission = sxgbe_enable_dma_transmission,
+ .enable_dma_irq = sxgbe_enable_dma_irq,
+ .disable_dma_irq = sxgbe_disable_dma_irq,
+ .start_tx = sxgbe_dma_start_tx,
+ .start_tx_queue = sxgbe_dma_start_tx_queue,
+ .stop_tx = sxgbe_dma_stop_tx,
+ .stop_tx_queue = sxgbe_dma_stop_tx_queue,
+ .start_rx = sxgbe_dma_start_rx,
+ .stop_rx = sxgbe_dma_stop_rx,
+ .tx_dma_int_status = sxgbe_tx_dma_int_status,
+ .rx_dma_int_status = sxgbe_rx_dma_int_status,
+ .rx_watchdog = sxgbe_dma_rx_watchdog,
+ .enable_tso = sxgbe_enable_tso,
+};
+
+const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
+{
+ return &sxgbe_dma_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
new file mode 100644
index 000000000000..1607b54c9bb0
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
@@ -0,0 +1,50 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_DMA_H__
+#define __SXGBE_DMA_H__
+
+/* forward declaration */
+struct sxgbe_extra_stats;
+
+#define SXGBE_DMA_BLENMAP_LSHIFT 1
+#define SXGBE_DMA_TXPBL_LSHIFT 16
+#define SXGBE_DMA_RXPBL_LSHIFT 16
+#define DEFAULT_DMA_PBL 8
+
+struct sxgbe_dma_ops {
+ /* DMA core initialization */
+ int (*init)(void __iomem *ioaddr, int fix_burst, int burst_map);
+ void (*cha_init)(void __iomem *ioaddr, int cha_num, int fix_burst,
+ int pbl, dma_addr_t dma_tx, dma_addr_t dma_rx,
+ int t_rzie, int r_rsize);
+ void (*enable_dma_transmission)(void __iomem *ioaddr, int dma_cnum);
+ void (*enable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
+ void (*disable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
+ void (*start_tx)(void __iomem *ioaddr, int tchannels);
+ void (*start_tx_queue)(void __iomem *ioaddr, int dma_cnum);
+ void (*stop_tx)(void __iomem *ioaddr, int tchannels);
+ void (*stop_tx_queue)(void __iomem *ioaddr, int dma_cnum);
+ void (*start_rx)(void __iomem *ioaddr, int rchannels);
+ void (*stop_rx)(void __iomem *ioaddr, int rchannels);
+ int (*tx_dma_int_status)(void __iomem *ioaddr, int channel_no,
+ struct sxgbe_extra_stats *x);
+ int (*rx_dma_int_status)(void __iomem *ioaddr, int channel_no,
+ struct sxgbe_extra_stats *x);
+ /* Program the HW RX Watchdog */
+ void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt);
+ /* Enable TSO for each DMA channel */
+ void (*enable_tso)(void __iomem *ioaddr, u8 chan_num);
+};
+
+const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void);
+
+#endif /* __SXGBE_CORE_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
new file mode 100644
index 000000000000..0415fa50eeb7
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -0,0 +1,524 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+#include "sxgbe_dma.h"
+
+struct sxgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define SXGBE_STAT(m) \
+{ \
+ #m, \
+ FIELD_SIZEOF(struct sxgbe_extra_stats, m), \
+ offsetof(struct sxgbe_priv_data, xstats.m) \
+}
+
+static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
+ /* TX/RX IRQ events */
+ SXGBE_STAT(tx_process_stopped_irq),
+ SXGBE_STAT(tx_ctxt_desc_err),
+ SXGBE_STAT(tx_threshold),
+ SXGBE_STAT(rx_threshold),
+ SXGBE_STAT(tx_pkt_n),
+ SXGBE_STAT(rx_pkt_n),
+ SXGBE_STAT(normal_irq_n),
+ SXGBE_STAT(tx_normal_irq_n),
+ SXGBE_STAT(rx_normal_irq_n),
+ SXGBE_STAT(napi_poll),
+ SXGBE_STAT(tx_clean),
+ SXGBE_STAT(tx_reset_ic_bit),
+ SXGBE_STAT(rx_process_stopped_irq),
+ SXGBE_STAT(rx_underflow_irq),
+
+ /* Bus access errors */
+ SXGBE_STAT(fatal_bus_error_irq),
+ SXGBE_STAT(tx_read_transfer_err),
+ SXGBE_STAT(tx_write_transfer_err),
+ SXGBE_STAT(tx_desc_access_err),
+ SXGBE_STAT(tx_buffer_access_err),
+ SXGBE_STAT(tx_data_transfer_err),
+ SXGBE_STAT(rx_read_transfer_err),
+ SXGBE_STAT(rx_write_transfer_err),
+ SXGBE_STAT(rx_desc_access_err),
+ SXGBE_STAT(rx_buffer_access_err),
+ SXGBE_STAT(rx_data_transfer_err),
+
+ /* EEE-LPI stats */
+ SXGBE_STAT(tx_lpi_entry_n),
+ SXGBE_STAT(tx_lpi_exit_n),
+ SXGBE_STAT(rx_lpi_entry_n),
+ SXGBE_STAT(rx_lpi_exit_n),
+ SXGBE_STAT(eee_wakeup_error_n),
+
+ /* RX specific */
+ /* L2 error */
+ SXGBE_STAT(rx_code_gmii_err),
+ SXGBE_STAT(rx_watchdog_err),
+ SXGBE_STAT(rx_crc_err),
+ SXGBE_STAT(rx_gaint_pkt_err),
+ SXGBE_STAT(ip_hdr_err),
+ SXGBE_STAT(ip_payload_err),
+ SXGBE_STAT(overflow_error),
+
+ /* L2 Pkt type */
+ SXGBE_STAT(len_pkt),
+ SXGBE_STAT(mac_ctl_pkt),
+ SXGBE_STAT(dcb_ctl_pkt),
+ SXGBE_STAT(arp_pkt),
+ SXGBE_STAT(oam_pkt),
+ SXGBE_STAT(untag_okt),
+ SXGBE_STAT(other_pkt),
+ SXGBE_STAT(svlan_tag_pkt),
+ SXGBE_STAT(cvlan_tag_pkt),
+ SXGBE_STAT(dvlan_ocvlan_icvlan_pkt),
+ SXGBE_STAT(dvlan_osvlan_isvlan_pkt),
+ SXGBE_STAT(dvlan_osvlan_icvlan_pkt),
+ SXGBE_STAT(dvan_ocvlan_icvlan_pkt),
+
+ /* L3/L4 Pkt type */
+ SXGBE_STAT(not_ip_pkt),
+ SXGBE_STAT(ip4_tcp_pkt),
+ SXGBE_STAT(ip4_udp_pkt),
+ SXGBE_STAT(ip4_icmp_pkt),
+ SXGBE_STAT(ip4_unknown_pkt),
+ SXGBE_STAT(ip6_tcp_pkt),
+ SXGBE_STAT(ip6_udp_pkt),
+ SXGBE_STAT(ip6_icmp_pkt),
+ SXGBE_STAT(ip6_unknown_pkt),
+
+ /* Filter specific */
+ SXGBE_STAT(vlan_filter_match),
+ SXGBE_STAT(sa_filter_fail),
+ SXGBE_STAT(da_filter_fail),
+ SXGBE_STAT(hash_filter_pass),
+ SXGBE_STAT(l3_filter_match),
+ SXGBE_STAT(l4_filter_match),
+
+ /* RX context specific */
+ SXGBE_STAT(timestamp_dropped),
+ SXGBE_STAT(rx_msg_type_no_ptp),
+ SXGBE_STAT(rx_ptp_type_sync),
+ SXGBE_STAT(rx_ptp_type_follow_up),
+ SXGBE_STAT(rx_ptp_type_delay_req),
+ SXGBE_STAT(rx_ptp_type_delay_resp),
+ SXGBE_STAT(rx_ptp_type_pdelay_req),
+ SXGBE_STAT(rx_ptp_type_pdelay_resp),
+ SXGBE_STAT(rx_ptp_type_pdelay_follow_up),
+ SXGBE_STAT(rx_ptp_announce),
+ SXGBE_STAT(rx_ptp_mgmt),
+ SXGBE_STAT(rx_ptp_signal),
+ SXGBE_STAT(rx_ptp_resv_msg_type),
+};
+#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
+
+static int sxgbe_get_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (!priv->hw_cap.eee)
+ return -EOPNOTSUPP;
+
+ edata->eee_enabled = priv->eee_enabled;
+ edata->eee_active = priv->eee_active;
+ edata->tx_lpi_timer = priv->tx_lpi_timer;
+
+ return phy_ethtool_get_eee(priv->phydev, edata);
+}
+
+static int sxgbe_set_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ priv->eee_enabled = edata->eee_enabled;
+
+ if (!priv->eee_enabled) {
+ sxgbe_disable_eee_mode(priv);
+ } else {
+ /* We are asking for enabling the EEE but it is safe
+ * to verify all by invoking the eee_init function.
+ * In case of failure it will return an error.
+ */
+ priv->eee_enabled = sxgbe_eee_init(priv);
+ if (!priv->eee_enabled)
+ return -EOPNOTSUPP;
+
+ /* Do not change tx_lpi_timer in case of failure */
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ }
+
+ return phy_ethtool_set_eee(priv->phydev, edata);
+}
+
+static void sxgbe_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static int sxgbe_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (priv->phydev)
+ return phy_ethtool_gset(priv->phydev, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (priv->phydev)
+ return phy_ethtool_sset(priv->phydev, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static u32 sxgbe_getmsglevel(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void sxgbe_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+}
+
+static void sxgbe_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < SXGBE_STATS_LEN; i++) {
+ memcpy(p, sxgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int sxgbe_get_sset_count(struct net_device *netdev, int sset)
+{
+ int len;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ len = SXGBE_STATS_LEN;
+ return len;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void sxgbe_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 *data)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int i;
+ char *p;
+
+ if (priv->eee_enabled) {
+ int val = phy_get_eee_err(priv->phydev);
+
+ if (val)
+ priv->xstats.eee_wakeup_error_n = val;
+ }
+
+ for (i = 0; i < SXGBE_STATS_LEN; i++) {
+ p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset;
+ data[i] = (sxgbe_gstrings_stats[i].sizeof_stat == sizeof(u64))
+ ? (*(u64 *)p) : (*(u32 *)p);
+ }
+}
+
+static void sxgbe_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ channel->max_rx = SXGBE_MAX_RX_CHANNELS;
+ channel->max_tx = SXGBE_MAX_TX_CHANNELS;
+ channel->rx_count = SXGBE_RX_QUEUES;
+ channel->tx_count = SXGBE_TX_QUEUES;
+}
+
+static u32 sxgbe_riwt2usec(u32 riwt, struct sxgbe_priv_data *priv)
+{
+ unsigned long clk = clk_get_rate(priv->sxgbe_clk);
+
+ if (!clk)
+ return 0;
+
+ return (riwt * 256) / (clk / 1000000);
+}
+
+static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv)
+{
+ unsigned long clk = clk_get_rate(priv->sxgbe_clk);
+
+ if (!clk)
+ return 0;
+
+ return (usec * (clk / 1000000)) / 256;
+}
+
+static int sxgbe_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (priv->use_riwt)
+ ec->rx_coalesce_usecs = sxgbe_riwt2usec(priv->rx_riwt, priv);
+
+ return 0;
+}
+
+static int sxgbe_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ unsigned int rx_riwt;
+
+ if (!ec->rx_coalesce_usecs)
+ return -EINVAL;
+
+ rx_riwt = sxgbe_usec2riwt(ec->rx_coalesce_usecs, priv);
+
+ if ((rx_riwt > SXGBE_MAX_DMA_RIWT) || (rx_riwt < SXGBE_MIN_DMA_RIWT))
+ return -EINVAL;
+ else if (!priv->use_riwt)
+ return -EOPNOTSUPP;
+
+ priv->rx_riwt = rx_riwt;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+
+ return 0;
+}
+
+static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on sxgbe */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ ret = sxgbe_get_rss_hash_opts(priv, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
+ struct ethtool_rxnfc *cmd)
+{
+ u32 reg_val = 0;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(cmd->data & RXH_IP_SRC) ||
+ !(cmd->data & RXH_IP_DST) ||
+ !(cmd->data & RXH_L4_B_0_1) ||
+ !(cmd->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ reg_val = SXGBE_CORE_RSS_CTL_TCP4TE;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (!(cmd->data & RXH_IP_SRC) ||
+ !(cmd->data & RXH_IP_DST) ||
+ !(cmd->data & RXH_L4_B_0_1) ||
+ !(cmd->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ reg_val = SXGBE_CORE_RSS_CTL_UDP4TE;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ if (!(cmd->data & RXH_IP_SRC) ||
+ !(cmd->data & RXH_IP_DST) ||
+ (cmd->data & RXH_L4_B_0_1) ||
+ (cmd->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ reg_val = SXGBE_CORE_RSS_CTL_IP2TE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Read SXGBE RSS control register and update */
+ reg_val |= readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
+ writel(reg_val, priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
+ readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
+
+ return 0;
+}
+
+static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = sxgbe_set_rss_hash_opt(priv, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void sxgbe_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ u32 *reg_space = (u32 *)space;
+ int reg_offset;
+ int reg_ix = 0;
+ void __iomem *ioaddr = priv->ioaddr;
+
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+ /* MAC registers */
+ for (reg_offset = START_MAC_REG_OFFSET;
+ reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = readl(ioaddr + reg_offset);
+ reg_ix++;
+ }
+
+ /* MTL registers */
+ for (reg_offset = START_MTL_REG_OFFSET;
+ reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = readl(ioaddr + reg_offset);
+ reg_ix++;
+ }
+
+ /* DMA registers */
+ for (reg_offset = START_DMA_REG_OFFSET;
+ reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = readl(ioaddr + reg_offset);
+ reg_ix++;
+ }
+
+ BUG_ON(reg_ix * 4 > REG_SPACE_SIZE);
+}
+
+static int sxgbe_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+static const struct ethtool_ops sxgbe_ethtool_ops = {
+ .get_drvinfo = sxgbe_getdrvinfo,
+ .get_settings = sxgbe_getsettings,
+ .set_settings = sxgbe_setsettings,
+ .get_msglevel = sxgbe_getmsglevel,
+ .set_msglevel = sxgbe_setmsglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = sxgbe_get_strings,
+ .get_ethtool_stats = sxgbe_get_ethtool_stats,
+ .get_sset_count = sxgbe_get_sset_count,
+ .get_channels = sxgbe_get_channels,
+ .get_coalesce = sxgbe_get_coalesce,
+ .set_coalesce = sxgbe_set_coalesce,
+ .get_rxnfc = sxgbe_get_rxnfc,
+ .set_rxnfc = sxgbe_set_rxnfc,
+ .get_regs = sxgbe_get_regs,
+ .get_regs_len = sxgbe_get_regs_len,
+ .get_eee = sxgbe_get_eee,
+ .set_eee = sxgbe_set_eee,
+};
+
+void sxgbe_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
new file mode 100644
index 000000000000..a72688e8dc6c
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -0,0 +1,2317 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/sxgbe_platform.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_desc.h"
+#include "sxgbe_dma.h"
+#include "sxgbe_mtl.h"
+#include "sxgbe_reg.h"
+
+#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
+#define JUMBO_LEN 9000
+
+/* Module parameters */
+#define TX_TIMEO 5000
+#define DMA_TX_SIZE 512
+#define DMA_RX_SIZE 1024
+#define TC_DEFAULT 64
+#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
+/* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
+#define SXGBE_DEFAULT_LPI_TIMER 1000
+
+static int debug = -1;
+static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
+
+module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
+static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
+static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
+
+#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
+
+#define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+
+/**
+ * sxgbe_verify_args - verify the driver parameters.
+ * Description: it verifies if some wrong parameter is passed to the driver.
+ * Note that wrong parameters are replaced with the default values.
+ */
+static void sxgbe_verify_args(void)
+{
+ if (unlikely(eee_timer < 0))
+ eee_timer = SXGBE_DEFAULT_LPI_TIMER;
+}
+
+static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
+{
+ /* Check and enter in LPI mode */
+ if (!priv->tx_path_in_lpi_mode)
+ priv->hw->mac->set_eee_mode(priv->ioaddr);
+}
+
+void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
+{
+ /* Exit and disable EEE in case of we are are in LPI state. */
+ priv->hw->mac->reset_eee_mode(priv->ioaddr);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * sxgbe_eee_ctrl_timer
+ * @arg : data hook
+ * Description:
+ * If there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void sxgbe_eee_ctrl_timer(unsigned long arg)
+{
+ struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
+
+ sxgbe_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
+}
+
+/**
+ * sxgbe_eee_init
+ * @priv: private device pointer
+ * Description:
+ * If the EEE support has been enabled while configuring the driver,
+ * if the GMAC actually supports the EEE (from the HW cap reg) and the
+ * phy can also manage EEE, so enable the LPI state and start the timer
+ * to verify if the tx path can enter in LPI state.
+ */
+bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
+{
+ bool ret = false;
+
+ /* MAC core supports the EEE feature. */
+ if (priv->hw_cap.eee) {
+ /* Check if the PHY supports EEE */
+ if (phy_init_eee(priv->phydev, 1))
+ return false;
+
+ priv->eee_active = 1;
+ init_timer(&priv->eee_ctrl_timer);
+ priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
+ priv->eee_ctrl_timer.data = (unsigned long)priv;
+ priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
+ add_timer(&priv->eee_ctrl_timer);
+
+ priv->hw->mac->set_eee_timer(priv->ioaddr,
+ SXGBE_DEFAULT_LPI_TIMER,
+ priv->tx_lpi_timer);
+
+ pr_info("Energy-Efficient Ethernet initialized\n");
+
+ ret = true;
+ }
+
+ return ret;
+}
+
+static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
+{
+ /* When the EEE has been already initialised we have to
+ * modify the PLS bit in the LPI ctrl & status reg according
+ * to the PHY link status. For this reason.
+ */
+ if (priv->eee_enabled)
+ priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+}
+
+/**
+ * sxgbe_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ */
+static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
+{
+ u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
+
+ /* assign the proper divider, this will be used during
+ * mdio communication
+ */
+ if (clk_rate < SXGBE_CSR_F_150M)
+ priv->clk_csr = SXGBE_CSR_100_150M;
+ else if (clk_rate <= SXGBE_CSR_F_250M)
+ priv->clk_csr = SXGBE_CSR_150_250M;
+ else if (clk_rate <= SXGBE_CSR_F_300M)
+ priv->clk_csr = SXGBE_CSR_250_300M;
+ else if (clk_rate <= SXGBE_CSR_F_350M)
+ priv->clk_csr = SXGBE_CSR_300_350M;
+ else if (clk_rate <= SXGBE_CSR_F_400M)
+ priv->clk_csr = SXGBE_CSR_350_400M;
+ else if (clk_rate <= SXGBE_CSR_F_500M)
+ priv->clk_csr = SXGBE_CSR_400_500M;
+}
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
+
+static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
+{
+ return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
+}
+
+/**
+ * sxgbe_adjust_link
+ * @dev: net device structure
+ * Description: it adjusts the link parameters.
+ */
+static void sxgbe_adjust_link(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ u8 new_state = 0;
+ u8 speed = 0xff;
+
+ if (!phydev)
+ return;
+
+ /* SXGBE is not supporting auto-negotiation and
+ * half duplex mode. so, not handling duplex change
+ * in this function. only handling speed and link status
+ */
+ if (phydev->link) {
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case SPEED_10000:
+ speed = SXGBE_SPEED_10G;
+ break;
+ case SPEED_2500:
+ speed = SXGBE_SPEED_2_5G;
+ break;
+ case SPEED_1000:
+ speed = SXGBE_SPEED_1G;
+ break;
+ default:
+ netif_err(priv, link, dev,
+ "Speed (%d) not supported\n",
+ phydev->speed);
+ }
+
+ priv->speed = phydev->speed;
+ priv->hw->mac->set_speed(priv->ioaddr, speed);
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state & netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ /* Alter the MAC settings for EEE */
+ sxgbe_eee_adjust(priv);
+}
+
+/**
+ * sxgbe_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ * Return value:
+ * 0 on success
+ */
+static int sxgbe_init_phy(struct net_device *ndev)
+{
+ char phy_id_fmt[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
+ struct phy_device *phydev;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+ int phy_iface = priv->plat->interface;
+
+ /* assign default link status */
+ priv->oldlink = 0;
+ priv->speed = SPEED_UNKNOWN;
+ priv->oldduplex = DUPLEX_UNKNOWN;
+
+ if (priv->plat->phy_bus_name)
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+ priv->plat->phy_bus_name, priv->plat->bus_id);
+ else
+ snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
+ priv->plat->bus_id);
+
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->plat->phy_addr);
+ netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
+
+ phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* Stop Advertising 1000BASE Capability if interface is not GMII */
+ if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
+ (phy_iface == PHY_INTERFACE_MODE_RMII))
+ phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+ if (phydev->phy_id == 0) {
+ phy_disconnect(phydev);
+ return -ENODEV;
+ }
+
+ netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
+ __func__, phydev->phy_id, phydev->link);
+
+ /* save phy device in private structure */
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+/**
+ * sxgbe_clear_descriptors: clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the tx and rx descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
+{
+ int i, j;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+
+ /* Clear the Rx/Tx descriptors */
+ for (j = 0; j < SXGBE_RX_QUEUES; j++) {
+ for (i = 0; i < rxsize; i++)
+ priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
+ priv->use_riwt, priv->mode,
+ (i == rxsize - 1));
+ }
+
+ for (j = 0; j < SXGBE_TX_QUEUES; j++) {
+ for (i = 0; i < txsize; i++)
+ priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
+ }
+}
+
+static int sxgbe_init_rx_buffers(struct net_device *dev,
+ struct sxgbe_rx_norm_desc *p, int i,
+ unsigned int dma_buf_sz,
+ struct sxgbe_rx_queue *rx_ring)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ rx_ring->rx_skbuff[i] = skb;
+ rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ dma_buf_sz, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
+ netdev_err(dev, "%s: DMA mapping error\n", __func__);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
+
+ return 0;
+}
+/**
+ * init_tx_ring - init the TX descriptor ring
+ * @dev: net device structure
+ * @tx_ring: ring to be intialised
+ * @tx_rsize: ring size
+ * Description: this function initializes the DMA TX descriptor
+ */
+static int init_tx_ring(struct device *dev, u8 queue_no,
+ struct sxgbe_tx_queue *tx_ring, int tx_rsize)
+{
+ /* TX ring is not allcoated */
+ if (!tx_ring) {
+ dev_err(dev, "No memory for TX queue of SXGBE\n");
+ return -ENOMEM;
+ }
+
+ /* allocate memory for TX descriptors */
+ tx_ring->dma_tx = dma_zalloc_coherent(dev,
+ tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+ &tx_ring->dma_tx_phy, GFP_KERNEL);
+ if (!tx_ring->dma_tx)
+ return -ENOMEM;
+
+ /* allocate memory for TX skbuff array */
+ tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!tx_ring->tx_skbuff_dma)
+ goto dmamem_err;
+
+ tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
+ sizeof(struct sk_buff *), GFP_KERNEL);
+
+ if (!tx_ring->tx_skbuff)
+ goto dmamem_err;
+
+ /* assign queue number */
+ tx_ring->queue_no = queue_no;
+
+ /* initalise counters */
+ tx_ring->dirty_tx = 0;
+ tx_ring->cur_tx = 0;
+
+ /* initalise TX queue lock */
+ spin_lock_init(&tx_ring->tx_lock);
+
+ return 0;
+
+dmamem_err:
+ dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+ tx_ring->dma_tx, tx_ring->dma_tx_phy);
+ return -ENOMEM;
+}
+
+/**
+ * free_rx_ring - free the RX descriptor ring
+ * @dev: net device structure
+ * @rx_ring: ring to be intialised
+ * @rx_rsize: ring size
+ * Description: this function initializes the DMA RX descriptor
+ */
+void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
+ int rx_rsize)
+{
+ dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ rx_ring->dma_rx, rx_ring->dma_rx_phy);
+ kfree(rx_ring->rx_skbuff_dma);
+ kfree(rx_ring->rx_skbuff);
+}
+
+/**
+ * init_rx_ring - init the RX descriptor ring
+ * @dev: net device structure
+ * @rx_ring: ring to be intialised
+ * @rx_rsize: ring size
+ * Description: this function initializes the DMA RX descriptor
+ */
+static int init_rx_ring(struct net_device *dev, u8 queue_no,
+ struct sxgbe_rx_queue *rx_ring, int rx_rsize)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int desc_index;
+ unsigned int bfsize = 0;
+ unsigned int ret = 0;
+
+ /* Set the max buffer size according to the MTU. */
+ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
+
+ netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
+
+ /* RX ring is not allcoated */
+ if (rx_ring == NULL) {
+ netdev_err(dev, "No memory for RX queue\n");
+ goto error;
+ }
+
+ /* assign queue number */
+ rx_ring->queue_no = queue_no;
+
+ /* allocate memory for RX descriptors */
+ rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ &rx_ring->dma_rx_phy, GFP_KERNEL);
+
+ if (rx_ring->dma_rx == NULL)
+ goto error;
+
+ /* allocate memory for RX skbuff array */
+ rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (rx_ring->rx_skbuff_dma == NULL)
+ goto dmamem_err;
+
+ rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
+ sizeof(struct sk_buff *), GFP_KERNEL);
+ if (rx_ring->rx_skbuff == NULL)
+ goto rxbuff_err;
+
+ /* initialise the buffers */
+ for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
+ struct sxgbe_rx_norm_desc *p;
+ p = rx_ring->dma_rx + desc_index;
+ ret = sxgbe_init_rx_buffers(dev, p, desc_index,
+ bfsize, rx_ring);
+ if (ret)
+ goto err_init_rx_buffers;
+ }
+
+ /* initalise counters */
+ rx_ring->cur_rx = 0;
+ rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
+ priv->dma_buf_sz = bfsize;
+
+ return 0;
+
+err_init_rx_buffers:
+ while (--desc_index >= 0)
+ free_rx_ring(priv->device, rx_ring, desc_index);
+ kfree(rx_ring->rx_skbuff);
+rxbuff_err:
+ kfree(rx_ring->rx_skbuff_dma);
+dmamem_err:
+ dma_free_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ rx_ring->dma_rx, rx_ring->dma_rx_phy);
+error:
+ return -ENOMEM;
+}
+/**
+ * free_tx_ring - free the TX descriptor ring
+ * @dev: net device structure
+ * @tx_ring: ring to be intialised
+ * @tx_rsize: ring size
+ * Description: this function initializes the DMA TX descriptor
+ */
+void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
+ int tx_rsize)
+{
+ dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+ tx_ring->dma_tx, tx_ring->dma_tx_phy);
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It suppors the chained and ring
+ * modes.
+ */
+static int init_dma_desc_rings(struct net_device *netd)
+{
+ int queue_num, ret;
+ struct sxgbe_priv_data *priv = netdev_priv(netd);
+ int tx_rsize = priv->dma_tx_size;
+ int rx_rsize = priv->dma_rx_size;
+
+ /* Allocate memory for queue structures and TX descs */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ ret = init_tx_ring(priv->device, queue_num,
+ priv->txq[queue_num], tx_rsize);
+ if (ret) {
+ dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
+ goto txalloc_err;
+ }
+
+ /* save private pointer in each ring this
+ * pointer is needed during cleaing TX queue
+ */
+ priv->txq[queue_num]->priv_ptr = priv;
+ }
+
+ /* Allocate memory for queue structures and RX descs */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ ret = init_rx_ring(netd, queue_num,
+ priv->rxq[queue_num], rx_rsize);
+ if (ret) {
+ netdev_err(netd, "RX DMA ring allocation failed!!\n");
+ goto rxalloc_err;
+ }
+
+ /* save private pointer in each ring this
+ * pointer is needed during cleaing TX queue
+ */
+ priv->rxq[queue_num]->priv_ptr = priv;
+ }
+
+ sxgbe_clear_descriptors(priv);
+
+ return 0;
+
+txalloc_err:
+ while (queue_num--)
+ free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
+ return ret;
+
+rxalloc_err:
+ while (queue_num--)
+ free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
+ return ret;
+}
+
+static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
+{
+ int dma_desc;
+ struct sxgbe_priv_data *priv = txqueue->priv_ptr;
+ int tx_rsize = priv->dma_tx_size;
+
+ for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
+ struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
+
+ if (txqueue->tx_skbuff_dma[dma_desc])
+ dma_unmap_single(priv->device,
+ txqueue->tx_skbuff_dma[dma_desc],
+ priv->hw->desc->get_tx_len(tdesc),
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
+ txqueue->tx_skbuff[dma_desc] = NULL;
+ txqueue->tx_skbuff_dma[dma_desc] = 0;
+ }
+}
+
+
+static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
+ tx_free_ring_skbufs(tqueue);
+ }
+}
+
+static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+ int tx_rsize = priv->dma_tx_size;
+ int rx_rsize = priv->dma_rx_size;
+
+ /* Release the DMA TX buffers */
+ dma_free_tx_skbufs(priv);
+
+ /* Release the TX ring memory also */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
+ }
+
+ /* Release the RX ring memory also */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
+ }
+}
+
+static int txring_mem_alloc(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ priv->txq[queue_num] = devm_kmalloc(priv->device,
+ sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
+ if (!priv->txq[queue_num])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ priv->rxq[queue_num] = devm_kmalloc(priv->device,
+ sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
+ if (!priv->rxq[queue_num])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * sxgbe_mtl_operation_mode - HW MTL operation mode
+ * @priv: driver private structure
+ * Description: it sets the MTL operation mode: tx/rx MTL thresholds
+ * or Store-And-Forward capability.
+ */
+static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ /* TX/RX threshold control */
+ if (likely(priv->plat->force_sf_dma_mode)) {
+ /* set TC mode for TX QUEUES */
+ SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
+ priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
+ SXGBE_MTL_SFMODE);
+ priv->tx_tc = SXGBE_MTL_SFMODE;
+
+ /* set TC mode for RX QUEUES */
+ SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
+ priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
+ SXGBE_MTL_SFMODE);
+ priv->rx_tc = SXGBE_MTL_SFMODE;
+ } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
+ /* set TC mode for TX QUEUES */
+ SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
+ priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
+ priv->tx_tc);
+ /* set TC mode for RX QUEUES */
+ SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
+ priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
+ priv->rx_tc);
+ } else {
+ pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
+ }
+}
+
+/**
+ * sxgbe_tx_queue_clean:
+ * @priv: driver private structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
+{
+ struct sxgbe_priv_data *priv = tqueue->priv_ptr;
+ unsigned int tx_rsize = priv->dma_tx_size;
+ struct netdev_queue *dev_txq;
+ u8 queue_no = tqueue->queue_no;
+
+ dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
+
+ spin_lock(&tqueue->tx_lock);
+
+ priv->xstats.tx_clean++;
+ while (tqueue->dirty_tx != tqueue->cur_tx) {
+ unsigned int entry = tqueue->dirty_tx % tx_rsize;
+ struct sk_buff *skb = tqueue->tx_skbuff[entry];
+ struct sxgbe_tx_norm_desc *p;
+
+ p = tqueue->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (priv->hw->desc->get_tx_owner(p))
+ break;
+
+ if (netif_msg_tx_done(priv))
+ pr_debug("%s: curr %d, dirty %d\n",
+ __func__, tqueue->cur_tx, tqueue->dirty_tx);
+
+ if (likely(tqueue->tx_skbuff_dma[entry])) {
+ dma_unmap_single(priv->device,
+ tqueue->tx_skbuff_dma[entry],
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ tqueue->tx_skbuff_dma[entry] = 0;
+ }
+
+ if (likely(skb)) {
+ dev_kfree_skb(skb);
+ tqueue->tx_skbuff[entry] = NULL;
+ }
+
+ priv->hw->desc->release_tx_desc(p);
+
+ tqueue->dirty_tx++;
+ }
+
+ /* wake up queue */
+ if (unlikely(netif_tx_queue_stopped(dev_txq) &&
+ sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
+ netif_tx_lock(priv->dev);
+ if (netif_tx_queue_stopped(dev_txq) &&
+ sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
+ if (netif_msg_tx_done(priv))
+ pr_debug("%s: restart transmit\n", __func__);
+ netif_tx_wake_queue(dev_txq);
+ }
+ netif_tx_unlock(priv->dev);
+ }
+
+ spin_unlock(&tqueue->tx_lock);
+}
+
+/**
+ * sxgbe_tx_clean:
+ * @priv: driver private structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
+{
+ u8 queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
+
+ sxgbe_tx_queue_clean(tqueue);
+ }
+
+ if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+ sxgbe_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
+ }
+}
+
+/**
+ * sxgbe_restart_tx_queue: irq tx error mng function
+ * @priv: driver private structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
+{
+ struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
+ struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
+ queue_num);
+
+ /* stop the queue */
+ netif_tx_stop_queue(dev_txq);
+
+ /* stop the tx dma */
+ priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
+
+ /* free the skbuffs of the ring */
+ tx_free_ring_skbufs(tx_ring);
+
+ /* initalise counters */
+ tx_ring->cur_tx = 0;
+ tx_ring->dirty_tx = 0;
+
+ /* start the tx dma */
+ priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
+
+ priv->dev->stats.tx_errors++;
+
+ /* wakeup the queue */
+ netif_tx_wake_queue(dev_txq);
+}
+
+/**
+ * sxgbe_reset_all_tx_queues: irq tx error mng function
+ * @priv: driver private structure
+ * Description: it cleans all the descriptors and
+ * restarts the transmission on all queues in case of errors.
+ */
+static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ /* On TX timeout of net device, resetting of all queues
+ * may not be proper way, revisit this later if needed
+ */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
+ sxgbe_restart_tx_queue(priv, queue_num);
+}
+
+/**
+ * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
+ * @priv: driver private structure
+ * Description:
+ * new GMAC chip generations have a new register to indicate the
+ * presence of the optional feature/functions.
+ * This can be also used to override the value passed through the
+ * platform and necessary for old MAC10/100 and GMAC chips.
+ */
+static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
+{
+ int rval = 0;
+ struct sxgbe_hw_features *features = &priv->hw_cap;
+
+ /* Read First Capability Register CAP[0] */
+ rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
+ if (rval) {
+ features->pmt_remote_wake_up =
+ SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
+ features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
+ features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
+ features->tx_csum_offload =
+ SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
+ features->rx_csum_offload =
+ SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
+ features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
+ features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
+ features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
+ features->eee = SXGBE_HW_FEAT_EEE(rval);
+ }
+
+ /* Read First Capability Register CAP[1] */
+ rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
+ if (rval) {
+ features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
+ features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
+ features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
+ features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
+ features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
+ features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
+ features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
+ features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
+ features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
+ features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
+ }
+
+ /* Read First Capability Register CAP[2] */
+ rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
+ if (rval) {
+ features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
+ features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
+ features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
+ features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
+ features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
+ features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
+ }
+
+ return rval;
+}
+
+/**
+ * sxgbe_check_ether_addr: check if the MAC addr is valid
+ * @priv: driver private structure
+ * Description:
+ * it is to verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address
+ */
+static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
+{
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ priv->hw->mac->get_umac_addr((void __iomem *)
+ priv->ioaddr,
+ priv->dev->dev_addr, 0);
+ if (!is_valid_ether_addr(priv->dev->dev_addr))
+ eth_hw_addr_random(priv->dev);
+ }
+ dev_info(priv->device, "device MAC address %pM\n",
+ priv->dev->dev_addr);
+}
+
+/**
+ * sxgbe_init_dma_engine: DMA init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the DMA invoking the specific SXGBE callback.
+ * Some DMA parameters can be passed from the platform;
+ * in case of these are not passed a default is kept for the MAC or GMAC.
+ */
+static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
+{
+ int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
+ int queue_num;
+
+ if (priv->plat->dma_cfg) {
+ pbl = priv->plat->dma_cfg->pbl;
+ fixed_burst = priv->plat->dma_cfg->fixed_burst;
+ burst_map = priv->plat->dma_cfg->burst_map;
+ }
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
+ priv->hw->dma->cha_init(priv->ioaddr, queue_num,
+ fixed_burst, pbl,
+ (priv->txq[queue_num])->dma_tx_phy,
+ (priv->rxq[queue_num])->dma_rx_phy,
+ priv->dma_tx_size, priv->dma_rx_size);
+
+ return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
+}
+
+/**
+ * sxgbe_init_mtl_engine: MTL init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the MTL invoking the specific SXGBE callback.
+ */
+static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
+ priv->hw_cap.tx_mtl_qsize);
+ priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
+ }
+}
+
+/**
+ * sxgbe_disable_mtl_engine: MTL disable.
+ * @priv: driver private structure
+ * Description:
+ * It disables the MTL queues by invoking the specific SXGBE callback.
+ */
+static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
+{
+ int queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
+ priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
+}
+
+
+/**
+ * sxgbe_tx_timer: mitigation sw timer for tx.
+ * @data: data pointer
+ * Description:
+ * This is the timer handler to directly invoke the sxgbe_tx_clean.
+ */
+static void sxgbe_tx_timer(unsigned long data)
+{
+ struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
+ sxgbe_tx_queue_clean(p);
+}
+
+/**
+ * sxgbe_init_tx_coalesce: init tx mitigation options.
+ * @priv: driver private structure
+ * Description:
+ * This inits the transmit coalesce parameters: i.e. timer rate,
+ * timer handler and default threshold used for enabling the
+ * interrupt on completion bit.
+ */
+static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
+{
+ u8 queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ struct sxgbe_tx_queue *p = priv->txq[queue_num];
+ p->tx_coal_frames = SXGBE_TX_FRAMES;
+ p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
+ init_timer(&p->txtimer);
+ p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
+ p->txtimer.data = (unsigned long)&priv->txq[queue_num];
+ p->txtimer.function = sxgbe_tx_timer;
+ add_timer(&p->txtimer);
+ }
+}
+
+static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
+{
+ u8 queue_num;
+
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ struct sxgbe_tx_queue *p = priv->txq[queue_num];
+ del_timer_sync(&p->txtimer);
+ }
+}
+
+/**
+ * sxgbe_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int sxgbe_open(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int ret, queue_num;
+
+ clk_prepare_enable(priv->sxgbe_clk);
+
+ sxgbe_check_ether_addr(priv);
+
+ /* Init the phy */
+ ret = sxgbe_init_phy(dev);
+ if (ret) {
+ netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
+ __func__, ret);
+ goto phy_error;
+ }
+
+ /* Create and initialize the TX/RX descriptors chains. */
+ priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
+ priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
+ priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
+ priv->tx_tc = TC_DEFAULT;
+ priv->rx_tc = TC_DEFAULT;
+ init_dma_desc_rings(dev);
+
+ /* DMA initialization and SW reset */
+ ret = sxgbe_init_dma_engine(priv);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA initialization failed\n", __func__);
+ goto init_error;
+ }
+
+ /* MTL initialization */
+ sxgbe_init_mtl_engine(priv);
+
+ /* Copy the MAC addr into the HW */
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+
+ /* Initialize the MAC Core */
+ priv->hw->mac->core_init(priv->ioaddr);
+
+ /* Request the IRQ lines */
+ ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, priv->irq, ret);
+ goto init_error;
+ }
+
+ /* If the LPI irq is different from the mac irq
+ * register a dedicated handler
+ */
+ if (priv->lpi_irq != dev->irq) {
+ ret = devm_request_irq(priv->device, priv->lpi_irq,
+ sxgbe_common_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ goto init_error;
+ }
+ }
+
+ /* Request TX DMA irq lines */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ ret = devm_request_irq(priv->device,
+ (priv->txq[queue_num])->irq_no,
+ sxgbe_tx_interrupt, 0,
+ dev->name, priv->txq[queue_num]);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
+ __func__, priv->irq, ret);
+ goto init_error;
+ }
+ }
+
+ /* Request RX DMA irq lines */
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ ret = devm_request_irq(priv->device,
+ (priv->rxq[queue_num])->irq_no,
+ sxgbe_rx_interrupt, 0,
+ dev->name, priv->rxq[queue_num]);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
+ __func__, priv->irq, ret);
+ goto init_error;
+ }
+ }
+
+ /* Enable the MAC Rx/Tx */
+ priv->hw->mac->enable_tx(priv->ioaddr, true);
+ priv->hw->mac->enable_rx(priv->ioaddr, true);
+
+ /* Set the HW DMA mode and the COE */
+ sxgbe_mtl_operation_mode(priv);
+
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
+
+ priv->xstats.tx_threshold = priv->tx_tc;
+ priv->xstats.rx_threshold = priv->rx_tc;
+
+ /* Start the ball rolling... */
+ netdev_dbg(dev, "DMA RX/TX processes started...\n");
+ priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+ priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ /* initalise TX coalesce parameters */
+ sxgbe_tx_init_coalesce(priv);
+
+ if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+ priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
+ }
+
+ priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
+ priv->eee_enabled = sxgbe_eee_init(priv);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+init_error:
+ free_dma_desc_resources(priv);
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+phy_error:
+ clk_disable_unprepare(priv->sxgbe_clk);
+
+ return ret;
+}
+
+/**
+ * sxgbe_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int sxgbe_release(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ if (priv->eee_enabled)
+ del_timer_sync(&priv->eee_ctrl_timer);
+
+ /* Stop and disconnect the PHY */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ netif_tx_stop_all_queues(dev);
+
+ napi_disable(&priv->napi);
+
+ /* delete TX timers */
+ sxgbe_tx_del_timer(priv);
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+ priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
+
+ /* disable MTL queue */
+ sxgbe_disable_mtl_engine(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ priv->hw->mac->enable_tx(priv->ioaddr, false);
+ priv->hw->mac->enable_rx(priv->ioaddr, false);
+
+ clk_disable_unprepare(priv->sxgbe_clk);
+
+ return 0;
+}
+
+/* Prepare first Tx descriptor for doing TSO operation */
+void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
+ struct sxgbe_tx_norm_desc *first_desc,
+ struct sk_buff *skb)
+{
+ unsigned int total_hdr_len, tcp_hdr_len;
+
+ /* Write first Tx descriptor with appropriate value */
+ tcp_hdr_len = tcp_hdrlen(skb);
+ total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
+
+ first_desc->tdes01 = dma_map_single(priv->device, skb->data,
+ total_hdr_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, first_desc->tdes01))
+ pr_err("%s: TX dma mapping failed!!\n", __func__);
+
+ first_desc->tdes23.tx_rd_des23.first_desc = 1;
+ priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
+ tcp_hdr_len,
+ skb->len - total_hdr_len);
+}
+
+/**
+ * sxgbe_xmit: Tx entry point of the driver
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : this is the tx entry point of the driver.
+ * It programs the chain or the ring and supports oversized frames
+ * and SG feature.
+ */
+static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int entry, frag_num;
+ int cksum_flag = 0;
+ struct netdev_queue *dev_txq;
+ unsigned txq_index = skb_get_queue_mapping(skb);
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ unsigned int tx_rsize = priv->dma_tx_size;
+ struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
+ struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
+ struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int no_pagedlen = skb_headlen(skb);
+ int is_jumbo = 0;
+ u16 cur_mss = skb_shinfo(skb)->gso_size;
+ u32 ctxt_desc_req = 0;
+
+ /* get the TX queue handle */
+ dev_txq = netdev_get_tx_queue(dev, txq_index);
+
+ if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
+ ctxt_desc_req = 1;
+
+ if (unlikely(vlan_tx_tag_present(skb) ||
+ ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tqueue->hwts_tx_en)))
+ ctxt_desc_req = 1;
+
+ /* get the spinlock */
+ spin_lock(&tqueue->tx_lock);
+
+ if (priv->tx_path_in_lpi_mode)
+ sxgbe_disable_eee_mode(priv);
+
+ if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
+ if (!netif_tx_queue_stopped(dev_txq)) {
+ netif_tx_stop_queue(dev_txq);
+ netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
+ __func__, txq_index);
+ }
+ /* release the spin lock in case of BUSY */
+ spin_unlock(&tqueue->tx_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = tqueue->cur_tx % tx_rsize;
+ tx_desc = tqueue->dma_tx + entry;
+
+ first_desc = tx_desc;
+ if (ctxt_desc_req)
+ ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
+
+ /* save the skb address */
+ tqueue->tx_skbuff[entry] = skb;
+
+ if (!is_jumbo) {
+ if (likely(skb_is_gso(skb))) {
+ /* TSO support */
+ if (unlikely(tqueue->prev_mss != cur_mss)) {
+ priv->hw->desc->tx_ctxt_desc_set_mss(
+ ctxt_desc, cur_mss);
+ priv->hw->desc->tx_ctxt_desc_set_tcmssv(
+ ctxt_desc);
+ priv->hw->desc->tx_ctxt_desc_reset_ostc(
+ ctxt_desc);
+ priv->hw->desc->tx_ctxt_desc_set_ctxt(
+ ctxt_desc);
+ priv->hw->desc->tx_ctxt_desc_set_owner(
+ ctxt_desc);
+
+ entry = (++tqueue->cur_tx) % tx_rsize;
+ first_desc = tqueue->dma_tx + entry;
+
+ tqueue->prev_mss = cur_mss;
+ }
+ sxgbe_tso_prepare(priv, first_desc, skb);
+ } else {
+ tx_desc->tdes01 = dma_map_single(priv->device,
+ skb->data, no_pagedlen, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, tx_desc->tdes01))
+ netdev_err(dev, "%s: TX dma mapping failed!!\n",
+ __func__);
+
+ priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
+ no_pagedlen, cksum_flag);
+ }
+ }
+
+ for (frag_num = 0; frag_num < nr_frags; frag_num++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
+ int len = skb_frag_size(frag);
+
+ entry = (++tqueue->cur_tx) % tx_rsize;
+ tx_desc = tqueue->dma_tx + entry;
+ tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+
+ tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
+ tqueue->tx_skbuff[entry] = NULL;
+
+ /* prepare the descriptor */
+ priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
+ len, cksum_flag);
+ /* memory barrier to flush descriptor */
+ wmb();
+
+ /* set the owner */
+ priv->hw->desc->set_tx_owner(tx_desc);
+ }
+
+ /* close the descriptors */
+ priv->hw->desc->close_tx_desc(tx_desc);
+
+ /* memory barrier to flush descriptor */
+ wmb();
+
+ tqueue->tx_count_frames += nr_frags + 1;
+ if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
+ priv->hw->desc->clear_tx_ic(tx_desc);
+ priv->xstats.tx_reset_ic_bit++;
+ mod_timer(&tqueue->txtimer,
+ SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
+ } else {
+ tqueue->tx_count_frames = 0;
+ }
+
+ /* set owner for first desc */
+ priv->hw->desc->set_tx_owner(first_desc);
+
+ /* memory barrier to flush descriptor */
+ wmb();
+
+ tqueue->cur_tx++;
+
+ /* display current ring */
+ netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
+ __func__, tqueue->cur_tx % tx_rsize,
+ tqueue->dirty_tx % tx_rsize, entry,
+ first_desc, nr_frags);
+
+ if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
+ netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_tx_stop_queue(dev_txq);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tqueue->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->tx_enable_tstamp(first_desc);
+ }
+
+ if (!tqueue->hwts_tx_en)
+ skb_tx_timestamp(skb);
+
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
+
+ spin_unlock(&tqueue->tx_lock);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * sxgbe_rx_refill: refill used skb preallocated buffers
+ * @priv: driver private structure
+ * Description : this is to reallocate the skb for the reception process
+ * that is based on zero-copy.
+ */
+static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ int bfsize = priv->dma_buf_sz;
+ u8 qnum = priv->cur_rx_qnum;
+
+ for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
+ priv->rxq[qnum]->dirty_rx++) {
+ unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
+ struct sxgbe_rx_norm_desc *p;
+
+ p = priv->rxq[qnum]->dma_rx + entry;
+
+ if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
+
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rxq[qnum]->rx_skbuff[entry] = skb;
+ priv->rxq[qnum]->rx_skbuff_dma[entry] =
+ dma_map_single(priv->device, skb->data, bfsize,
+ DMA_FROM_DEVICE);
+
+ p->rdes23.rx_rd_des23.buf2_addr =
+ priv->rxq[qnum]->rx_skbuff_dma[entry];
+ }
+
+ /* Added memory barrier for RX descriptor modification */
+ wmb();
+ priv->hw->desc->set_rx_owner(p);
+ /* Added memory barrier for RX descriptor modification */
+ wmb();
+ }
+}
+
+/**
+ * sxgbe_rx: receive the frames from the remote host
+ * @priv: driver private structure
+ * @limit: napi bugget.
+ * Description : this the function called by the napi poll method.
+ * It gets all the frames inside the ring.
+ */
+static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
+{
+ u8 qnum = priv->cur_rx_qnum;
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int entry = priv->rxq[qnum]->cur_rx;
+ unsigned int next_entry = 0;
+ unsigned int count = 0;
+ int checksum;
+ int status;
+
+ while (count < limit) {
+ struct sxgbe_rx_norm_desc *p;
+ struct sk_buff *skb;
+ int frame_len;
+
+ p = priv->rxq[qnum]->dma_rx + entry;
+
+ if (priv->hw->desc->get_rx_owner(p))
+ break;
+
+ count++;
+
+ next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
+ prefetch(priv->rxq[qnum]->dma_rx + next_entry);
+
+ /* Read the status of the incoming frame and also get checksum
+ * value based on whether it is enabled in SXGBE hardware or
+ * not.
+ */
+ status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
+ &checksum);
+ if (unlikely(status < 0)) {
+ entry = next_entry;
+ continue;
+ }
+ if (unlikely(!priv->rxcsum_insertion))
+ checksum = CHECKSUM_NONE;
+
+ skb = priv->rxq[qnum]->rx_skbuff[entry];
+
+ if (unlikely(!skb))
+ netdev_err(priv->dev, "rx descriptor is not consistent\n");
+
+ prefetch(skb->data - NET_IP_ALIGN);
+ priv->rxq[qnum]->rx_skbuff[entry] = NULL;
+
+ frame_len = priv->hw->desc->get_rx_frame_len(p);
+
+ skb_put(skb, frame_len);
+
+ skb->ip_summed = checksum;
+ if (checksum == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&priv->napi, skb);
+
+ entry = next_entry;
+ }
+
+ sxgbe_rx_refill(priv);
+
+ return count;
+}
+
+/**
+ * sxgbe_poll - sxgbe poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * To look at the incoming frames and clear the tx resources.
+ */
+static int sxgbe_poll(struct napi_struct *napi, int budget)
+{
+ struct sxgbe_priv_data *priv = container_of(napi,
+ struct sxgbe_priv_data, napi);
+ int work_done = 0;
+ u8 qnum = priv->cur_rx_qnum;
+
+ priv->xstats.napi_poll++;
+ /* first, clean the tx queues */
+ sxgbe_tx_all_clean(priv);
+
+ work_done = sxgbe_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+ priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
+ }
+
+ return work_done;
+}
+
+/**
+ * sxgbe_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable time. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void sxgbe_tx_timeout(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ sxgbe_reset_all_tx_queues(priv);
+}
+
+/**
+ * sxgbe_common_interrupt - main ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the main driver interrupt service routine.
+ * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
+ * interrupts.
+ */
+static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
+{
+ struct net_device *netdev = (struct net_device *)dev_id;
+ struct sxgbe_priv_data *priv = netdev_priv(netdev);
+ int status;
+
+ status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
+ /* For LPI we need to save the tx status */
+ if (status & TX_ENTRY_LPI_MODE) {
+ priv->xstats.tx_lpi_entry_n++;
+ priv->tx_path_in_lpi_mode = true;
+ }
+ if (status & TX_EXIT_LPI_MODE) {
+ priv->xstats.tx_lpi_exit_n++;
+ priv->tx_path_in_lpi_mode = false;
+ }
+ if (status & RX_ENTRY_LPI_MODE)
+ priv->xstats.rx_lpi_entry_n++;
+ if (status & RX_EXIT_LPI_MODE)
+ priv->xstats.rx_lpi_exit_n++;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * sxgbe_tx_interrupt - TX DMA ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the tx dma interrupt service routine.
+ */
+static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
+{
+ int status;
+ struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
+ struct sxgbe_priv_data *priv = txq->priv_ptr;
+
+ /* get the channel status */
+ status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
+ &priv->xstats);
+ /* check for normal path */
+ if (likely((status & handle_tx)))
+ napi_schedule(&priv->napi);
+
+ /* check for unrecoverable error */
+ if (unlikely((status & tx_hard_error)))
+ sxgbe_restart_tx_queue(priv, txq->queue_no);
+
+ /* check for TC configuration change */
+ if (unlikely((status & tx_bump_tc) &&
+ (priv->tx_tc != SXGBE_MTL_SFMODE) &&
+ (priv->tx_tc < 512))) {
+ /* step of TX TC is 32 till 128, otherwise 64 */
+ priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
+ priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
+ txq->queue_no, priv->tx_tc);
+ priv->xstats.tx_threshold = priv->tx_tc;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * sxgbe_rx_interrupt - RX DMA ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the rx dma interrupt service routine.
+ */
+static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
+{
+ int status;
+ struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
+ struct sxgbe_priv_data *priv = rxq->priv_ptr;
+
+ /* get the channel status */
+ status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
+ &priv->xstats);
+
+ if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
+ priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
+ __napi_schedule(&priv->napi);
+ }
+
+ /* check for TC configuration change */
+ if (unlikely((status & rx_bump_tc) &&
+ (priv->rx_tc != SXGBE_MTL_SFMODE) &&
+ (priv->rx_tc < 128))) {
+ /* step of TC is 32 */
+ priv->rx_tc += 32;
+ priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
+ rxq->queue_no, priv->rx_tc);
+ priv->xstats.rx_threshold = priv->rx_tc;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
+{
+ u64 val = readl(ioaddr + reg_lo);
+
+ val |= ((u64)readl(ioaddr + reg_hi)) << 32;
+
+ return val;
+}
+
+
+/* sxgbe_get_stats64 - entry point to see statistical information of device
+ * @dev : device pointer.
+ * @stats : pointer to hold all the statistical information of device.
+ * Description:
+ * This function is a driver entry point whenever ifconfig command gets
+ * executed to see device statistics. Statistics are number of
+ * bytes sent or received, errors occured etc.
+ * Return value:
+ * This function returns various statistical information of device.
+ */
+static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->ioaddr;
+ u64 count;
+
+ spin_lock(&priv->stats_lock);
+ /* Freeze the counter registers before reading value otherwise it may
+ * get updated by hardware while we are reading them
+ */
+ writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
+
+ stats->rx_bytes = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXOCTETLO_GCNT_REG,
+ SXGBE_MMC_RXOCTETHI_GCNT_REG);
+
+ stats->rx_packets = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXFRAMELO_GBCNT_REG,
+ SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
+
+ stats->multicast = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXMULTILO_GCNT_REG,
+ SXGBE_MMC_RXMULTIHI_GCNT_REG);
+
+ stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXCRCERRLO_REG,
+ SXGBE_MMC_RXCRCERRHI_REG);
+
+ stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXLENERRLO_REG,
+ SXGBE_MMC_RXLENERRHI_REG);
+
+ stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
+ SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
+
+ stats->tx_bytes = sxgbe_get_stat64(ioaddr,
+ SXGBE_MMC_TXOCTETLO_GCNT_REG,
+ SXGBE_MMC_TXOCTETHI_GCNT_REG);
+
+ count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
+ SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
+
+ stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
+ SXGBE_MMC_TXFRAMEHI_GCNT_REG);
+ stats->tx_errors = count - stats->tx_errors;
+ stats->tx_packets = count;
+ stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
+ SXGBE_MMC_TXUFLWHI_GBCNT_REG);
+ writel(0, ioaddr + SXGBE_MMC_CTL_REG);
+ spin_unlock(&priv->stats_lock);
+
+ return stats;
+}
+
+/* sxgbe_set_features - entry point to set offload features of the device.
+ * @dev : device pointer.
+ * @features : features which are required to be set.
+ * Description:
+ * This function is a driver entry point and called by Linux kernel whenever
+ * any device features are set or reset by user.
+ * Return value:
+ * This function returns 0 after setting or resetting device features.
+ */
+static int sxgbe_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ netdev_features_t changed = dev->features ^ features;
+
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM) {
+ priv->hw->mac->enable_rx_csum(priv->ioaddr);
+ priv->rxcsum_insertion = true;
+ } else {
+ priv->hw->mac->disable_rx_csum(priv->ioaddr);
+ priv->rxcsum_insertion = false;
+ }
+ }
+
+ return 0;
+}
+
+/* sxgbe_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
+{
+ /* RFC 791, page 25, "Every internet module must be able to forward
+ * a datagram of 68 octets without further fragmentation."
+ */
+ if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
+ netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
+ MIN_MTU, MAX_MTU);
+ return -EINVAL;
+ }
+
+ /* Return if the buffer sizes will not change */
+ if (dev->mtu == new_mtu)
+ return 0;
+
+ dev->mtu = new_mtu;
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
+ * changed then reinitilisation of the receive ring buffers need to be
+ * done. Hence bring interface down and bring interface back up
+ */
+ sxgbe_release(dev);
+ return sxgbe_open(dev);
+}
+
+static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ /* For MAC Addr registers se have to set the Address Enable (AE)
+ * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+ * is RO.
+ */
+ writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
+}
+
+/**
+ * sxgbe_set_rx_mode - entry point for setting different receive mode of
+ * a device. unicast, multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever different receive mode like unicast, multicast and promiscuous
+ * must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void sxgbe_set_rx_mode(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
+ unsigned int value = 0;
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+ int reg = 1;
+
+ netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC) {
+ value = SXGBE_FRAME_FILTER_PR;
+
+ } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
+ writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
+
+ } else if (!netdev_mc_empty(dev)) {
+ /* Hash filter for multicast */
+ value = SXGBE_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table
+ */
+ int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register.
+ */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
+ writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering) */
+ if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
+ /* Switch to promiscuous mode if more than 16 addrs
+ * are required
+ */
+ value |= SXGBE_FRAME_FILTER_PR;
+ else {
+ netdev_for_each_uc_addr(ha, dev) {
+ sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= SXGBE_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + SXGBE_FRAME_FILTER);
+
+ netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
+ readl(ioaddr + SXGBE_FRAME_FILTER),
+ readl(ioaddr + SXGBE_HASH_HIGH),
+ readl(ioaddr + SXGBE_HASH_LOW));
+}
+
+/**
+ * sxgbe_config - entry point for changing configuration mode passed on by
+ * ifconfig
+ * @dev : pointer to the device structure
+ * @map : pointer to the device mapping structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever some device configuration is changed.
+ * Return value:
+ * This function returns 0 if success and appropriate error otherwise.
+ */
+static int sxgbe_config(struct net_device *dev, struct ifmap *map)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ /* Can't act on a running interface */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != (unsigned long)priv->ioaddr) {
+ netdev_warn(dev, "can't change I/O address\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow changing the IRQ */
+ if (map->irq != priv->irq) {
+ netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * sxgbe_poll_controller - entry point for polling receive by device
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled.
+ * Return value:
+ * Void.
+ */
+static void sxgbe_poll_controller(struct net_device *dev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+ disable_irq(priv->irq);
+ sxgbe_rx_interrupt(priv->irq, dev);
+ enable_irq(priv->irq);
+}
+#endif
+
+/* sxgbe_ioctl - Entry point for the Ioctl
+ * @dev: Device pointer.
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: IOCTL command
+ * Description:
+ * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
+ */
+static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ return -EINVAL;
+ ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops sxgbe_netdev_ops = {
+ .ndo_open = sxgbe_open,
+ .ndo_start_xmit = sxgbe_xmit,
+ .ndo_stop = sxgbe_release,
+ .ndo_get_stats64 = sxgbe_get_stats64,
+ .ndo_change_mtu = sxgbe_change_mtu,
+ .ndo_set_features = sxgbe_set_features,
+ .ndo_set_rx_mode = sxgbe_set_rx_mode,
+ .ndo_tx_timeout = sxgbe_tx_timeout,
+ .ndo_do_ioctl = sxgbe_ioctl,
+ .ndo_set_config = sxgbe_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sxgbe_poll_controller,
+#endif
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/* Get the hardware ops */
+static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
+{
+ ops_ptr->mac = sxgbe_get_core_ops();
+ ops_ptr->desc = sxgbe_get_desc_ops();
+ ops_ptr->dma = sxgbe_get_dma_ops();
+ ops_ptr->mtl = sxgbe_get_mtl_ops();
+
+ /* set the MDIO communication Address/Data regisers */
+ ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
+ ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
+
+ /* Assigning the default link settings
+ * no SXGBE defined default values to be set in registers,
+ * so assigning as 0 for port and duplex
+ */
+ ops_ptr->link.port = 0;
+ ops_ptr->link.duplex = 0;
+ ops_ptr->link.speed = SXGBE_SPEED_10G;
+}
+
+/**
+ * sxgbe_hw_init - Init the GMAC device
+ * @priv: driver private structure
+ * Description: this function checks the HW capability
+ * (if supported) and sets the driver's features.
+ */
+static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
+{
+ u32 ctrl_ids;
+
+ priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
+ if(!priv->hw)
+ return -ENOMEM;
+
+ /* get the hardware ops */
+ sxgbe_get_ops(priv->hw);
+
+ /* get the controller id */
+ ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
+ priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
+ priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
+ pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
+ priv->hw->ctrl_uid, priv->hw->ctrl_id);
+
+ /* get the H/W features */
+ if (!sxgbe_get_hw_features(priv))
+ pr_info("Hardware features not found\n");
+
+ if (priv->hw_cap.tx_csum_offload)
+ pr_info("TX Checksum offload supported\n");
+
+ if (priv->hw_cap.rx_csum_offload)
+ pr_info("RX Checksum offload supported\n");
+
+ return 0;
+}
+
+/**
+ * sxgbe_drv_probe
+ * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ */
+struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
+ struct sxgbe_plat_data *plat_dat,
+ void __iomem *addr)
+{
+ struct sxgbe_priv_data *priv;
+ struct net_device *ndev;
+ int ret;
+ u8 queue_num;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
+ SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
+ if (!ndev)
+ return NULL;
+
+ SET_NETDEV_DEV(ndev, device);
+
+ priv = netdev_priv(ndev);
+ priv->device = device;
+ priv->dev = ndev;
+
+ sxgbe_set_ethtool_ops(ndev);
+ priv->plat = plat_dat;
+ priv->ioaddr = addr;
+
+ /* Verify driver arguments */
+ sxgbe_verify_args();
+
+ /* Init MAC and get the capabilities */
+ ret = sxgbe_hw_init(priv);
+ if (ret)
+ goto error_free_netdev;
+
+ /* allocate memory resources for Descriptor rings */
+ ret = txring_mem_alloc(priv);
+ if (ret)
+ goto error_free_netdev;
+
+ ret = rxring_mem_alloc(priv);
+ if (ret)
+ goto error_free_netdev;
+
+ ndev->netdev_ops = &sxgbe_netdev_ops;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GRO;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
+
+ /* assign filtering support */
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ /* Enable TCP segmentation offload for all DMA channels */
+ if (priv->hw_cap.tcpseg_offload) {
+ SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+ priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
+ }
+ }
+
+ /* Enable Rx checksum offload */
+ if (priv->hw_cap.rx_csum_offload) {
+ priv->hw->mac->enable_rx_csum(priv->ioaddr);
+ priv->rxcsum_insertion = true;
+ }
+
+ /* Initialise pause frame settings */
+ priv->rx_pause = 1;
+ priv->tx_pause = 1;
+
+ /* Rx Watchdog is available, enable depend on platform data */
+ if (!priv->plat->riwt_off) {
+ priv->use_riwt = 1;
+ pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
+ netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
+
+ spin_lock_init(&priv->stats_lock);
+
+ priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
+ if (IS_ERR(priv->sxgbe_clk)) {
+ netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
+ __func__);
+ goto error_clk_get;
+ }
+
+ /* If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed. Viceversa the driver'll try to
+ * set the MDC clock dynamically according to the csr actual
+ * clock input.
+ */
+ if (!priv->plat->clk_csr)
+ sxgbe_clk_csr_set(priv);
+ else
+ priv->clk_csr = priv->plat->clk_csr;
+
+ /* MDIO bus Registration */
+ ret = sxgbe_mdio_register(ndev);
+ if (ret < 0) {
+ netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
+ __func__, priv->plat->bus_id);
+ goto error_mdio_register;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("%s: ERROR %i registering the device\n", __func__, ret);
+ goto error_netdev_register;
+ }
+
+ sxgbe_check_ether_addr(priv);
+
+ return priv;
+
+error_mdio_register:
+ clk_put(priv->sxgbe_clk);
+error_clk_get:
+error_netdev_register:
+ netif_napi_del(&priv->napi);
+error_free_netdev:
+ free_netdev(ndev);
+
+ return NULL;
+}
+
+/**
+ * sxgbe_drv_remove
+ * @ndev: net device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings.
+ */
+int sxgbe_drv_remove(struct net_device *ndev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ netdev_info(ndev, "%s: removing driver\n", __func__);
+
+ priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
+ priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+
+ priv->hw->mac->enable_tx(priv->ioaddr, false);
+ priv->hw->mac->enable_rx(priv->ioaddr, false);
+
+ netif_napi_del(&priv->napi);
+
+ sxgbe_mdio_unregister(ndev);
+
+ unregister_netdev(ndev);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int sxgbe_suspend(struct net_device *ndev)
+{
+ return 0;
+}
+
+int sxgbe_resume(struct net_device *ndev)
+{
+ return 0;
+}
+
+int sxgbe_freeze(struct net_device *ndev)
+{
+ return -ENOSYS;
+}
+
+int sxgbe_restore(struct net_device *ndev)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/* Driver is configured as Platform driver */
+static int __init sxgbe_init(void)
+{
+ int ret;
+
+ ret = sxgbe_register_platform();
+ if (ret)
+ goto err;
+ return 0;
+err:
+ pr_err("driver registration failed\n");
+ return ret;
+}
+
+static void __exit sxgbe_exit(void)
+{
+ sxgbe_unregister_platform();
+}
+
+module_init(sxgbe_init);
+module_exit(sxgbe_exit);
+
+#ifndef MODULE
+static int __init sxgbe_cmdline_opt(char *str)
+{
+ char *opt;
+
+ if (!str || !*str)
+ return -EINVAL;
+ while ((opt = strsep(&str, ",")) != NULL) {
+ if (!strncmp(opt, "eee_timer:", 6)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
+ goto err;
+ }
+ }
+ return 0;
+
+err:
+ pr_err("%s: ERROR broken module parameter conversion\n", __func__);
+ return -EINVAL;
+}
+
+__setup("sxgbeeth=", sxgbe_cmdline_opt);
+#endif /* MODULE */
+
+
+
+MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
+
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
+
+MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
+MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
+MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
+MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
new file mode 100644
index 000000000000..01af2cbb479d
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -0,0 +1,244 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/sxgbe_platform.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+
+#define SXGBE_SMA_WRITE_CMD 0x01 /* write command */
+#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
+#define SXGBE_SMA_READ_CMD 0x03 /* read command */
+#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
+#define SXGBE_MII_BUSY 0x00800000 /* mii busy */
+
+static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
+{
+ unsigned long fin_time = jiffies + 3 * HZ; /* 3 seconds */
+
+ while (!time_after(jiffies, fin_time)) {
+ if (!(readl(ioaddr + mii_data) & SXGBE_MII_BUSY))
+ return 0;
+ cpu_relax();
+ }
+
+ return -EBUSY;
+}
+
+static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd,
+ u16 phydata)
+{
+ u32 reg = phydata;
+
+ reg |= (cmd << 16) | SXGBE_SMA_SKIP_ADDRFRM |
+ ((sp->clk_csr & 0x7) << 19) | SXGBE_MII_BUSY;
+ writel(reg, sp->ioaddr + sp->hw->mii.data);
+}
+
+static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ u32 reg;
+
+ /* set mdio address register */
+ reg = ((phyreg >> 16) & 0x1f) << 21;
+ reg |= (phyaddr << 16) | (phyreg & 0xffff);
+ writel(reg, sp->ioaddr + sp->hw->mii.addr);
+
+ sxgbe_mdio_ctrl_data(sp, cmd, phydata);
+}
+
+static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ u32 reg;
+
+ writel(1 << phyaddr, sp->ioaddr + SXGBE_MDIO_CLAUSE22_PORT_REG);
+
+ /* set mdio address register */
+ reg = (phyaddr << 16) | (phyreg & 0x1f);
+ writel(reg, sp->ioaddr + sp->hw->mii.addr);
+
+ sxgbe_mdio_ctrl_data(sp, cmd, phydata);
+}
+
+static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ const struct mii_regs *mii = &sp->hw->mii;
+ int rc;
+
+ rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+ if (rc < 0)
+ return rc;
+
+ if (phyreg & MII_ADDR_C45) {
+ sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata);
+ } else {
+ /* Ports 0-3 only support C22. */
+ if (phyaddr >= 4)
+ return -ENODEV;
+
+ sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
+ }
+
+ return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+}
+
+/**
+ * sxgbe_mdio_read
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @phyreg: address of register with in phy register
+ * Description: this function used for C45 and C22 MDIO Read
+ */
+static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+ int rc;
+
+ rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0);
+ if (rc < 0)
+ return rc;
+
+ return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff;
+}
+
+/**
+ * sxgbe_mdio_write
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @phyreg: address of phy registers
+ * @phydata: data to be written into phy register
+ * Description: this function is used for C45 and C22 MDIO write
+ */
+static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
+ phydata);
+}
+
+int sxgbe_mdio_register(struct net_device *ndev)
+{
+ struct mii_bus *mdio_bus;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+ struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
+ int err, phy_addr;
+ int *irqlist;
+ bool act;
+
+ /* allocate the new mdio bus */
+ mdio_bus = mdiobus_alloc();
+ if (!mdio_bus) {
+ netdev_err(ndev, "%s: mii bus allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (mdio_data->irqs)
+ irqlist = mdio_data->irqs;
+ else
+ irqlist = priv->mii_irq;
+
+ /* assign mii bus fields */
+ mdio_bus->name = "samsxgbe";
+ mdio_bus->read = &sxgbe_mdio_read;
+ mdio_bus->write = &sxgbe_mdio_write;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ mdio_bus->name, priv->plat->bus_id);
+ mdio_bus->priv = ndev;
+ mdio_bus->phy_mask = mdio_data->phy_mask;
+ mdio_bus->parent = priv->device;
+
+ /* register with kernel subsystem */
+ err = mdiobus_register(mdio_bus);
+ if (err != 0) {
+ netdev_err(ndev, "mdiobus register failed\n");
+ goto mdiobus_err;
+ }
+
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ struct phy_device *phy = mdio_bus->phy_map[phy_addr];
+
+ if (phy) {
+ char irq_num[4];
+ char *irq_str;
+ /* If an IRQ was provided to be assigned after
+ * the bus probe, do it here.
+ */
+ if ((mdio_data->irqs == NULL) &&
+ (mdio_data->probed_phy_irq > 0)) {
+ irqlist[phy_addr] = mdio_data->probed_phy_irq;
+ phy->irq = mdio_data->probed_phy_irq;
+ }
+
+ /* If we're going to bind the MAC to this PHY bus,
+ * and no PHY number was provided to the MAC,
+ * use the one probed here.
+ */
+ if (priv->plat->phy_addr == -1)
+ priv->plat->phy_addr = phy_addr;
+
+ act = (priv->plat->phy_addr == phy_addr);
+ switch (phy->irq) {
+ case PHY_POLL:
+ irq_str = "POLL";
+ break;
+ case PHY_IGNORE_INTERRUPT:
+ irq_str = "IGNORE";
+ break;
+ default:
+ sprintf(irq_num, "%d", phy->irq);
+ irq_str = irq_num;
+ break;
+ }
+ netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
+ phy->phy_id, phy_addr, irq_str,
+ dev_name(&phy->dev), act ? " active" : "");
+ }
+ }
+
+ priv->mii = mdio_bus;
+
+ return 0;
+
+mdiobus_err:
+ mdiobus_free(mdio_bus);
+ return err;
+}
+
+int sxgbe_mdio_unregister(struct net_device *ndev)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ if (!priv->mii)
+ return 0;
+
+ mdiobus_unregister(priv->mii);
+ priv->mii->priv = NULL;
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
new file mode 100644
index 000000000000..324681c2bb74
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
@@ -0,0 +1,254 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+
+#include "sxgbe_mtl.h"
+#include "sxgbe_reg.h"
+
+static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
+ unsigned int raa)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
+ reg_val &= ETS_RST;
+
+ /* ETS Algorith */
+ switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
+ case ETS_WRR:
+ reg_val &= ETS_WRR;
+ break;
+ case ETS_WFQ:
+ reg_val |= ETS_WFQ;
+ break;
+ case ETS_DWRR:
+ reg_val |= ETS_DWRR;
+ break;
+ }
+ writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
+
+ switch (raa & SXGBE_MTL_OPMODE_RAAMASK) {
+ case RAA_SP:
+ reg_val &= RAA_SP;
+ break;
+ case RAA_WSP:
+ reg_val |= RAA_WSP;
+ break;
+ }
+ writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
+}
+
+/* For Dynamic DMA channel mapping for Rx queue */
+static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr)
+{
+ writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG);
+ writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG);
+ writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG);
+}
+
+static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num,
+ int queue_fifo)
+{
+ u32 fifo_bits, reg_val;
+
+ /* 0 means 256 bytes */
+ fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1;
+ reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+ reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
+ writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num,
+ int queue_fifo)
+{
+ u32 fifo_bits, reg_val;
+
+ /* 0 means 256 bytes */
+ fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1;
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+ reg_val |= SXGBE_MTL_ENABLE_QUEUE;
+ writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+ reg_val &= ~SXGBE_MTL_ENABLE_QUEUE;
+ writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num,
+ int threshold)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE);
+ reg_val |= (threshold << RX_FC_ACTIVE);
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val |= SXGBE_MTL_ENABLE_FC;
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num,
+ int threshold)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE);
+ reg_val |= (threshold << RX_FC_DEACTIVE);
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val |= SXGBE_MTL_RXQ_OP_FEP;
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP);
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val |= SXGBE_MTL_RXQ_OP_FUP;
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP);
+
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+
+static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num,
+ int tx_mode)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+ /* TX specific MTL mode settings */
+ if (tx_mode == SXGBE_MTL_SFMODE) {
+ reg_val |= SXGBE_MTL_SFMODE;
+ } else {
+ /* set the TTC values */
+ if (tx_mode <= 64)
+ reg_val |= MTL_CONTROL_TTC_64;
+ else if (tx_mode <= 96)
+ reg_val |= MTL_CONTROL_TTC_96;
+ else if (tx_mode <= 128)
+ reg_val |= MTL_CONTROL_TTC_128;
+ else if (tx_mode <= 192)
+ reg_val |= MTL_CONTROL_TTC_192;
+ else if (tx_mode <= 256)
+ reg_val |= MTL_CONTROL_TTC_256;
+ else if (tx_mode <= 384)
+ reg_val |= MTL_CONTROL_TTC_384;
+ else
+ reg_val |= MTL_CONTROL_TTC_512;
+ }
+
+ /* write into TXQ operation register */
+ writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num,
+ int rx_mode)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+ /* RX specific MTL mode settings */
+ if (rx_mode == SXGBE_RX_MTL_SFMODE) {
+ reg_val |= SXGBE_RX_MTL_SFMODE;
+ } else {
+ if (rx_mode <= 64)
+ reg_val |= MTL_CONTROL_RTC_64;
+ else if (rx_mode <= 96)
+ reg_val |= MTL_CONTROL_RTC_96;
+ else if (rx_mode <= 128)
+ reg_val |= MTL_CONTROL_RTC_128;
+ }
+
+ /* write into RXQ operation register */
+ writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static const struct sxgbe_mtl_ops mtl_ops = {
+ .mtl_set_txfifosize = sxgbe_mtl_set_txfifosize,
+ .mtl_set_rxfifosize = sxgbe_mtl_set_rxfifosize,
+ .mtl_enable_txqueue = sxgbe_mtl_enable_txqueue,
+ .mtl_disable_txqueue = sxgbe_mtl_disable_txqueue,
+ .mtl_dynamic_dma_rxqueue = sxgbe_mtl_dma_dm_rxqueue,
+ .set_tx_mtl_mode = sxgbe_set_tx_mtl_mode,
+ .set_rx_mtl_mode = sxgbe_set_rx_mtl_mode,
+ .mtl_init = sxgbe_mtl_init,
+ .mtl_fc_active = sxgbe_mtl_fc_active,
+ .mtl_fc_deactive = sxgbe_mtl_fc_deactive,
+ .mtl_fc_enable = sxgbe_mtl_fc_enable,
+ .mtl_fep_enable = sxgbe_mtl_fep_enable,
+ .mtl_fep_disable = sxgbe_mtl_fep_disable,
+ .mtl_fup_enable = sxgbe_mtl_fup_enable,
+ .mtl_fup_disable = sxgbe_mtl_fup_disable
+};
+
+const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void)
+{
+ return &mtl_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
new file mode 100644
index 000000000000..7e4810c4137e
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
@@ -0,0 +1,104 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_MTL_H__
+#define __SXGBE_MTL_H__
+
+#define SXGBE_MTL_OPMODE_ESTMASK 0x3
+#define SXGBE_MTL_OPMODE_RAAMASK 0x1
+#define SXGBE_MTL_FCMASK 0x7
+#define SXGBE_MTL_TX_FIFO_DIV 256
+#define SXGBE_MTL_RX_FIFO_DIV 256
+
+#define SXGBE_MTL_RXQ_OP_FEP BIT(4)
+#define SXGBE_MTL_RXQ_OP_FUP BIT(3)
+#define SXGBE_MTL_ENABLE_FC 0x80
+
+#define ETS_WRR 0xFFFFFF9F
+#define ETS_RST 0xFFFFFF9F
+#define ETS_WFQ 0x00000020
+#define ETS_DWRR 0x00000040
+#define RAA_SP 0xFFFFFFFB
+#define RAA_WSP 0x00000004
+
+#define RX_QUEUE_DYNAMIC 0x80808080
+#define RX_FC_ACTIVE 8
+#define RX_FC_DEACTIVE 13
+
+enum ttc_control {
+ MTL_CONTROL_TTC_64 = 0x00000000,
+ MTL_CONTROL_TTC_96 = 0x00000020,
+ MTL_CONTROL_TTC_128 = 0x00000030,
+ MTL_CONTROL_TTC_192 = 0x00000040,
+ MTL_CONTROL_TTC_256 = 0x00000050,
+ MTL_CONTROL_TTC_384 = 0x00000060,
+ MTL_CONTROL_TTC_512 = 0x00000070,
+};
+
+enum rtc_control {
+ MTL_CONTROL_RTC_64 = 0x00000000,
+ MTL_CONTROL_RTC_96 = 0x00000002,
+ MTL_CONTROL_RTC_128 = 0x00000003,
+};
+
+enum flow_control_th {
+ MTL_FC_FULL_1K = 0x00000000,
+ MTL_FC_FULL_2K = 0x00000001,
+ MTL_FC_FULL_4K = 0x00000002,
+ MTL_FC_FULL_5K = 0x00000003,
+ MTL_FC_FULL_6K = 0x00000004,
+ MTL_FC_FULL_8K = 0x00000005,
+ MTL_FC_FULL_16K = 0x00000006,
+ MTL_FC_FULL_24K = 0x00000007,
+};
+
+struct sxgbe_mtl_ops {
+ void (*mtl_init)(void __iomem *ioaddr, unsigned int etsalg,
+ unsigned int raa);
+
+ void (*mtl_set_txfifosize)(void __iomem *ioaddr, int queue_num,
+ int mtl_fifo);
+
+ void (*mtl_set_rxfifosize)(void __iomem *ioaddr, int queue_num,
+ int queue_fifo);
+
+ void (*mtl_enable_txqueue)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_disable_txqueue)(void __iomem *ioaddr, int queue_num);
+
+ void (*set_tx_mtl_mode)(void __iomem *ioaddr, int queue_num,
+ int tx_mode);
+
+ void (*set_rx_mtl_mode)(void __iomem *ioaddr, int queue_num,
+ int rx_mode);
+
+ void (*mtl_dynamic_dma_rxqueue)(void __iomem *ioaddr);
+
+ void (*mtl_fc_active)(void __iomem *ioaddr, int queue_num,
+ int threshold);
+
+ void (*mtl_fc_deactive)(void __iomem *ioaddr, int queue_num,
+ int threshold);
+
+ void (*mtl_fc_enable)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_fep_enable)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_fep_disable)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_fup_enable)(void __iomem *ioaddr, int queue_num);
+
+ void (*mtl_fup_disable)(void __iomem *ioaddr, int queue_num);
+};
+
+const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
+
+#endif /* __SXGBE_MTL_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
new file mode 100644
index 000000000000..b147d469a799
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -0,0 +1,259 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/sxgbe_platform.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+
+#ifdef CONFIG_OF
+static int sxgbe_probe_config_dt(struct platform_device *pdev,
+ struct sxgbe_plat_data *plat,
+ const char **mac)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sxgbe_dma_cfg *dma_cfg;
+
+ if (!np)
+ return -ENODEV;
+
+ *mac = of_get_mac_address(np);
+ plat->interface = of_get_phy_mode(np);
+
+ plat->bus_id = of_alias_get_id(np, "ethernet");
+ if (plat->bus_id < 0)
+ plat->bus_id = 0;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+
+ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
+ if (!dma_cfg)
+ return -ENOMEM;
+
+ plat->dma_cfg = dma_cfg;
+ of_property_read_u32(np, "samsung,pbl", &dma_cfg->pbl);
+ if (of_property_read_u32(np, "samsung,burst-map", &dma_cfg->burst_map) == 0)
+ dma_cfg->fixed_burst = true;
+
+ return 0;
+}
+#else
+static int sxgbe_probe_config_dt(struct platform_device *pdev,
+ struct sxgbe_plat_data *plat,
+ const char **mac)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_OF */
+
+/**
+ * sxgbe_platform_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int sxgbe_platform_probe(struct platform_device *pdev)
+{
+ int ret;
+ int i, chan;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void __iomem *addr;
+ struct sxgbe_priv_data *priv = NULL;
+ struct sxgbe_plat_data *plat_dat = NULL;
+ const char *mac = NULL;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct device_node *node = dev->of_node;
+
+ /* Get memory resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto err_out;
+
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
+ if (pdev->dev.of_node) {
+ plat_dat = devm_kzalloc(&pdev->dev,
+ sizeof(struct sxgbe_plat_data),
+ GFP_KERNEL);
+ if (!plat_dat)
+ return -ENOMEM;
+
+ ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac);
+ if (ret) {
+ pr_err("%s: main dt probe failed\n", __func__);
+ return ret;
+ }
+ }
+
+ /* Get MAC address if available (DT) */
+ if (mac)
+ ether_addr_copy(priv->dev->dev_addr, mac);
+
+ priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
+ if (!priv) {
+ pr_err("%s: main driver probe failed\n", __func__);
+ goto err_out;
+ }
+
+ /* Get the SXGBE common INT information */
+ priv->irq = irq_of_parse_and_map(node, 0);
+ if (priv->irq <= 0) {
+ dev_err(dev, "sxgbe common irq parsing failed\n");
+ goto err_drv_remove;
+ }
+
+ /* Get the TX/RX IRQ numbers */
+ for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
+ priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
+ if (priv->txq[i]->irq_no <= 0) {
+ dev_err(dev, "sxgbe tx irq parsing failed\n");
+ goto err_tx_irq_unmap;
+ }
+ }
+
+ for (i = 0; i < SXGBE_RX_QUEUES; i++) {
+ priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++);
+ if (priv->rxq[i]->irq_no <= 0) {
+ dev_err(dev, "sxgbe rx irq parsing failed\n");
+ goto err_rx_irq_unmap;
+ }
+ }
+
+ priv->lpi_irq = irq_of_parse_and_map(node, chan);
+ if (priv->lpi_irq <= 0) {
+ dev_err(dev, "sxgbe lpi irq parsing failed\n");
+ goto err_rx_irq_unmap;
+ }
+
+ platform_set_drvdata(pdev, priv->dev);
+
+ pr_debug("platform driver registration completed\n");
+
+ return 0;
+
+err_rx_irq_unmap:
+ while (--i)
+ irq_dispose_mapping(priv->rxq[i]->irq_no);
+ i = SXGBE_TX_QUEUES;
+err_tx_irq_unmap:
+ while (--i)
+ irq_dispose_mapping(priv->txq[i]->irq_no);
+ irq_dispose_mapping(priv->irq);
+err_drv_remove:
+ sxgbe_drv_remove(ndev);
+err_out:
+ return -ENODEV;
+}
+
+/**
+ * sxgbe_platform_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int sxgbe_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ int ret = sxgbe_drv_remove(ndev);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int sxgbe_platform_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return sxgbe_suspend(ndev);
+}
+
+static int sxgbe_platform_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return sxgbe_resume(ndev);
+}
+
+static int sxgbe_platform_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return sxgbe_freeze(ndev);
+}
+
+static int sxgbe_platform_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return sxgbe_restore(ndev);
+}
+
+static const struct dev_pm_ops sxgbe_platform_pm_ops = {
+ .suspend = sxgbe_platform_suspend,
+ .resume = sxgbe_platform_resume,
+ .freeze = sxgbe_platform_freeze,
+ .thaw = sxgbe_platform_restore,
+ .restore = sxgbe_platform_restore,
+};
+#else
+static const struct dev_pm_ops sxgbe_platform_pm_ops;
+#endif /* CONFIG_PM */
+
+static const struct of_device_id sxgbe_dt_ids[] = {
+ { .compatible = "samsung,sxgbe-v2.0a"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
+
+static struct platform_driver sxgbe_platform_driver = {
+ .probe = sxgbe_platform_probe,
+ .remove = sxgbe_platform_remove,
+ .driver = {
+ .name = SXGBE_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &sxgbe_platform_pm_ops,
+ .of_match_table = of_match_ptr(sxgbe_dt_ids),
+ },
+};
+
+int sxgbe_register_platform(void)
+{
+ int err;
+
+ err = platform_driver_register(&sxgbe_platform_driver);
+ if (err)
+ pr_err("failed to register the platform driver\n");
+
+ return err;
+}
+
+void sxgbe_unregister_platform(void)
+{
+ platform_driver_unregister(&sxgbe_platform_driver);
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
new file mode 100644
index 000000000000..5a89acb4c505
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -0,0 +1,488 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_REGMAP_H__
+#define __SXGBE_REGMAP_H__
+
+/* SXGBE MAC Registers */
+#define SXGBE_CORE_TX_CONFIG_REG 0x0000
+#define SXGBE_CORE_RX_CONFIG_REG 0x0004
+#define SXGBE_CORE_PKT_FILTER_REG 0x0008
+#define SXGBE_CORE_WATCHDOG_TIMEOUT_REG 0x000C
+#define SXGBE_CORE_HASH_TABLE_REG0 0x0010
+#define SXGBE_CORE_HASH_TABLE_REG1 0x0014
+#define SXGBE_CORE_HASH_TABLE_REG2 0x0018
+#define SXGBE_CORE_HASH_TABLE_REG3 0x001C
+#define SXGBE_CORE_HASH_TABLE_REG4 0x0020
+#define SXGBE_CORE_HASH_TABLE_REG5 0x0024
+#define SXGBE_CORE_HASH_TABLE_REG6 0x0028
+#define SXGBE_CORE_HASH_TABLE_REG7 0x002C
+
+/* EEE-LPI Registers */
+#define SXGBE_CORE_LPI_CTRL_STATUS 0x00D0
+#define SXGBE_CORE_LPI_TIMER_CTRL 0x00D4
+
+/* VLAN Specific Registers */
+#define SXGBE_CORE_VLAN_TAG_REG 0x0050
+#define SXGBE_CORE_VLAN_HASHTAB_REG 0x0058
+#define SXGBE_CORE_VLAN_INSCTL_REG 0x0060
+#define SXGBE_CORE_VLAN_INNERCTL_REG 0x0064
+#define SXGBE_CORE_RX_ETHTYPE_MATCH_REG 0x006C
+
+/* Flow Contol Registers */
+#define SXGBE_CORE_TX_Q0_FLOWCTL_REG 0x0070
+#define SXGBE_CORE_TX_Q1_FLOWCTL_REG 0x0074
+#define SXGBE_CORE_TX_Q2_FLOWCTL_REG 0x0078
+#define SXGBE_CORE_TX_Q3_FLOWCTL_REG 0x007C
+#define SXGBE_CORE_TX_Q4_FLOWCTL_REG 0x0080
+#define SXGBE_CORE_TX_Q5_FLOWCTL_REG 0x0084
+#define SXGBE_CORE_TX_Q6_FLOWCTL_REG 0x0088
+#define SXGBE_CORE_TX_Q7_FLOWCTL_REG 0x008C
+#define SXGBE_CORE_RX_FLOWCTL_REG 0x0090
+#define SXGBE_CORE_RX_CTL0_REG 0x00A0
+#define SXGBE_CORE_RX_CTL1_REG 0x00A4
+#define SXGBE_CORE_RX_CTL2_REG 0x00A8
+#define SXGBE_CORE_RX_CTL3_REG 0x00AC
+
+/* Interrupt Registers */
+#define SXGBE_CORE_INT_STATUS_REG 0x00B0
+#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
+#define SXGBE_CORE_RXTX_ERR_STATUS_REG 0x00B8
+#define SXGBE_CORE_PMT_CTL_STATUS_REG 0x00C0
+#define SXGBE_CORE_RWK_PKT_FILTER_REG 0x00C4
+#define SXGBE_CORE_VERSION_REG 0x0110
+#define SXGBE_CORE_DEBUG_REG 0x0114
+#define SXGBE_CORE_HW_FEA_REG(index) (0x011C + index * 4)
+
+/* SMA(MDIO) module registers */
+#define SXGBE_MDIO_SCMD_ADD_REG 0x0200
+#define SXGBE_MDIO_SCMD_DATA_REG 0x0204
+#define SXGBE_MDIO_CCMD_WADD_REG 0x0208
+#define SXGBE_MDIO_CCMD_WDATA_REG 0x020C
+#define SXGBE_MDIO_CSCAN_PORT_REG 0x0210
+#define SXGBE_MDIO_INT_STATUS_REG 0x0214
+#define SXGBE_MDIO_INT_ENABLE_REG 0x0218
+#define SXGBE_MDIO_PORT_CONDCON_REG 0x021C
+#define SXGBE_MDIO_CLAUSE22_PORT_REG 0x0220
+
+/* port specific, addr = 0-3 */
+#define SXGBE_MDIO_DEV_BASE_REG 0x0230
+#define SXGBE_MDIO_PORT_DEV_REG(addr) \
+ (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x0)
+#define SXGBE_MDIO_PORT_LSTATUS_REG(addr) \
+ (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x4)
+#define SXGBE_MDIO_PORT_ALIVE_REG(addr) \
+ (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x8)
+
+#define SXGBE_CORE_GPIO_CTL_REG 0x0278
+#define SXGBE_CORE_GPIO_STATUS_REG 0x027C
+
+/* Address registers for filtering */
+#define SXGBE_CORE_ADD_BASE_REG 0x0300
+
+/* addr = 0-31 */
+#define SXGBE_CORE_ADD_HIGHOFFSET(addr) \
+ (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x0)
+#define SXGBE_CORE_ADD_LOWOFFSET(addr) \
+ (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x4)
+
+/* SXGBE MMC registers */
+#define SXGBE_MMC_CTL_REG 0x0800
+#define SXGBE_MMC_RXINT_STATUS_REG 0x0804
+#define SXGBE_MMC_TXINT_STATUS_REG 0x0808
+#define SXGBE_MMC_RXINT_ENABLE_REG 0x080C
+#define SXGBE_MMC_TXINT_ENABLE_REG 0x0810
+
+/* TX specific counters */
+#define SXGBE_MMC_TXOCTETHI_GBCNT_REG 0x0814
+#define SXGBE_MMC_TXOCTETLO_GBCNT_REG 0x0818
+#define SXGBE_MMC_TXFRAMELO_GBCNT_REG 0x081C
+#define SXGBE_MMC_TXFRAMEHI_GBCNT_REG 0x0820
+#define SXGBE_MMC_TXBROADLO_GCNT_REG 0x0824
+#define SXGBE_MMC_TXBROADHI_GCNT_REG 0x0828
+#define SXGBE_MMC_TXMULTILO_GCNT_REG 0x082C
+#define SXGBE_MMC_TXMULTIHI_GCNT_REG 0x0830
+#define SXGBE_MMC_TX64LO_GBCNT_REG 0x0834
+#define SXGBE_MMC_TX64HI_GBCNT_REG 0x0838
+#define SXGBE_MMC_TX65TO127LO_GBCNT_REG 0x083C
+#define SXGBE_MMC_TX65TO127HI_GBCNT_REG 0x0840
+#define SXGBE_MMC_TX128TO255LO_GBCNT_REG 0x0844
+#define SXGBE_MMC_TX128TO255HI_GBCNT_REG 0x0848
+#define SXGBE_MMC_TX256TO511LO_GBCNT_REG 0x084C
+#define SXGBE_MMC_TX256TO511HI_GBCNT_REG 0x0850
+#define SXGBE_MMC_TX512TO1023LO_GBCNT_REG 0x0854
+#define SXGBE_MMC_TX512TO1023HI_GBCNT_REG 0x0858
+#define SXGBE_MMC_TX1023TOMAXLO_GBCNT_REG 0x085C
+#define SXGBE_MMC_TX1023TOMAXHI_GBCNT_REG 0x0860
+#define SXGBE_MMC_TXUNICASTLO_GBCNT_REG 0x0864
+#define SXGBE_MMC_TXUNICASTHI_GBCNT_REG 0x0868
+#define SXGBE_MMC_TXMULTILO_GBCNT_REG 0x086C
+#define SXGBE_MMC_TXMULTIHI_GBCNT_REG 0x0870
+#define SXGBE_MMC_TXBROADLO_GBCNT_REG 0x0874
+#define SXGBE_MMC_TXBROADHI_GBCNT_REG 0x0878
+#define SXGBE_MMC_TXUFLWLO_GBCNT_REG 0x087C
+#define SXGBE_MMC_TXUFLWHI_GBCNT_REG 0x0880
+#define SXGBE_MMC_TXOCTETLO_GCNT_REG 0x0884
+#define SXGBE_MMC_TXOCTETHI_GCNT_REG 0x0888
+#define SXGBE_MMC_TXFRAMELO_GCNT_REG 0x088C
+#define SXGBE_MMC_TXFRAMEHI_GCNT_REG 0x0890
+#define SXGBE_MMC_TXPAUSELO_CNT_REG 0x0894
+#define SXGBE_MMC_TXPAUSEHI_CNT_REG 0x0898
+#define SXGBE_MMC_TXVLANLO_GCNT_REG 0x089C
+#define SXGBE_MMC_TXVLANHI_GCNT_REG 0x08A0
+
+/* RX specific counters */
+#define SXGBE_MMC_RXFRAMELO_GBCNT_REG 0x0900
+#define SXGBE_MMC_RXFRAMEHI_GBCNT_REG 0x0904
+#define SXGBE_MMC_RXOCTETLO_GBCNT_REG 0x0908
+#define SXGBE_MMC_RXOCTETHI_GBCNT_REG 0x090C
+#define SXGBE_MMC_RXOCTETLO_GCNT_REG 0x0910
+#define SXGBE_MMC_RXOCTETHI_GCNT_REG 0x0914
+#define SXGBE_MMC_RXBROADLO_GCNT_REG 0x0918
+#define SXGBE_MMC_RXBROADHI_GCNT_REG 0x091C
+#define SXGBE_MMC_RXMULTILO_GCNT_REG 0x0920
+#define SXGBE_MMC_RXMULTIHI_GCNT_REG 0x0924
+#define SXGBE_MMC_RXCRCERRLO_REG 0x0928
+#define SXGBE_MMC_RXCRCERRHI_REG 0x092C
+#define SXGBE_MMC_RXSHORT64BFRAME_ERR_REG 0x0930
+#define SXGBE_MMC_RXJABBERERR_REG 0x0934
+#define SXGBE_MMC_RXSHORT64BFRAME_COR_REG 0x0938
+#define SXGBE_MMC_RXOVERMAXFRAME_COR_REG 0x093C
+#define SXGBE_MMC_RX64LO_GBCNT_REG 0x0940
+#define SXGBE_MMC_RX64HI_GBCNT_REG 0x0944
+#define SXGBE_MMC_RX65TO127LO_GBCNT_REG 0x0948
+#define SXGBE_MMC_RX65TO127HI_GBCNT_REG 0x094C
+#define SXGBE_MMC_RX128TO255LO_GBCNT_REG 0x0950
+#define SXGBE_MMC_RX128TO255HI_GBCNT_REG 0x0954
+#define SXGBE_MMC_RX256TO511LO_GBCNT_REG 0x0958
+#define SXGBE_MMC_RX256TO511HI_GBCNT_REG 0x095C
+#define SXGBE_MMC_RX512TO1023LO_GBCNT_REG 0x0960
+#define SXGBE_MMC_RX512TO1023HI_GBCNT_REG 0x0964
+#define SXGBE_MMC_RX1023TOMAXLO_GBCNT_REG 0x0968
+#define SXGBE_MMC_RX1023TOMAXHI_GBCNT_REG 0x096C
+#define SXGBE_MMC_RXUNICASTLO_GCNT_REG 0x0970
+#define SXGBE_MMC_RXUNICASTHI_GCNT_REG 0x0974
+#define SXGBE_MMC_RXLENERRLO_REG 0x0978
+#define SXGBE_MMC_RXLENERRHI_REG 0x097C
+#define SXGBE_MMC_RXOUTOFRANGETYPELO_REG 0x0980
+#define SXGBE_MMC_RXOUTOFRANGETYPEHI_REG 0x0984
+#define SXGBE_MMC_RXPAUSELO_CNT_REG 0x0988
+#define SXGBE_MMC_RXPAUSEHI_CNT_REG 0x098C
+#define SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG 0x0990
+#define SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG 0x0994
+#define SXGBE_MMC_RXVLANLO_GBCNT_REG 0x0998
+#define SXGBE_MMC_RXVLANHI_GBCNT_REG 0x099C
+#define SXGBE_MMC_RXWATCHDOG_ERR_REG 0x09A0
+
+/* L3/L4 function registers */
+#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
+#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
+#define SXGBE_CORE_L34_DATA_REG 0x0C04
+
+/* ARP registers */
+#define SXGBE_CORE_ARP_ADD_REG 0x0C10
+
+/* RSS registers */
+#define SXGBE_CORE_RSS_CTL_REG 0x0C80
+#define SXGBE_CORE_RSS_ADD_REG 0x0C88
+#define SXGBE_CORE_RSS_DATA_REG 0x0C8C
+
+/* RSS control register bits */
+#define SXGBE_CORE_RSS_CTL_UDP4TE BIT(3)
+#define SXGBE_CORE_RSS_CTL_TCP4TE BIT(2)
+#define SXGBE_CORE_RSS_CTL_IP2TE BIT(1)
+#define SXGBE_CORE_RSS_CTL_RSSE BIT(0)
+
+/* IEEE 1588 registers */
+#define SXGBE_CORE_TSTAMP_CTL_REG 0x0D00
+#define SXGBE_CORE_SUBSEC_INC_REG 0x0D04
+#define SXGBE_CORE_SYSTIME_SEC_REG 0x0D0C
+#define SXGBE_CORE_SYSTIME_NSEC_REG 0x0D10
+#define SXGBE_CORE_SYSTIME_SECUP_REG 0x0D14
+#define SXGBE_CORE_TSTAMP_ADD_REG 0x0D18
+#define SXGBE_CORE_SYSTIME_HWORD_REG 0x0D1C
+#define SXGBE_CORE_TSTAMP_STATUS_REG 0x0D20
+#define SXGBE_CORE_TXTIME_STATUSNSEC_REG 0x0D30
+#define SXGBE_CORE_TXTIME_STATUSSEC_REG 0x0D34
+
+/* Auxiliary registers */
+#define SXGBE_CORE_AUX_CTL_REG 0x0D40
+#define SXGBE_CORE_AUX_TSTAMP_NSEC_REG 0x0D48
+#define SXGBE_CORE_AUX_TSTAMP_SEC_REG 0x0D4C
+#define SXGBE_CORE_AUX_TSTAMP_INGCOR_REG 0x0D50
+#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_REG 0x0D54
+#define SXGBE_CORE_AUX_TSTAMP_INGCOR_NSEC_REG 0x0D58
+#define SXGBE_CORE_AUX_TSTAMP_INGCOR_SUBNSEC_REG 0x0D5C
+#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_NSEC_REG 0x0D60
+#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_SUBNSEC_REG 0x0D64
+
+/* PPS registers */
+#define SXGBE_CORE_PPS_CTL_REG 0x0D70
+#define SXGBE_CORE_PPS_BASE 0x0D80
+
+/* addr = 0 - 3 */
+#define SXGBE_CORE_PPS_TTIME_SEC_REG(addr) \
+ (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x0)
+#define SXGBE_CORE_PPS_TTIME_NSEC_REG(addr) \
+ (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x4)
+#define SXGBE_CORE_PPS_INTERVAL_REG(addr) \
+ (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x8)
+#define SXGBE_CORE_PPS_WIDTH_REG(addr) \
+ (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0xC)
+#define SXGBE_CORE_PTO_CTL_REG 0x0DC0
+#define SXGBE_CORE_SRCPORT_ITY0_REG 0x0DC4
+#define SXGBE_CORE_SRCPORT_ITY1_REG 0x0DC8
+#define SXGBE_CORE_SRCPORT_ITY2_REG 0x0DCC
+#define SXGBE_CORE_LOGMSG_LEVEL_REG 0x0DD0
+
+/* SXGBE MTL Registers */
+#define SXGBE_MTL_BASE_REG 0x1000
+#define SXGBE_MTL_OP_MODE_REG (SXGBE_MTL_BASE_REG + 0x0000)
+#define SXGBE_MTL_DEBUG_CTL_REG (SXGBE_MTL_BASE_REG + 0x0008)
+#define SXGBE_MTL_DEBUG_STATUS_REG (SXGBE_MTL_BASE_REG + 0x000C)
+#define SXGBE_MTL_FIFO_DEBUGDATA_REG (SXGBE_MTL_BASE_REG + 0x0010)
+#define SXGBE_MTL_INT_STATUS_REG (SXGBE_MTL_BASE_REG + 0x0020)
+#define SXGBE_MTL_RXQ_DMAMAP0_REG (SXGBE_MTL_BASE_REG + 0x0030)
+#define SXGBE_MTL_RXQ_DMAMAP1_REG (SXGBE_MTL_BASE_REG + 0x0034)
+#define SXGBE_MTL_RXQ_DMAMAP2_REG (SXGBE_MTL_BASE_REG + 0x0038)
+#define SXGBE_MTL_TX_PRTYMAP0_REG (SXGBE_MTL_BASE_REG + 0x0040)
+#define SXGBE_MTL_TX_PRTYMAP1_REG (SXGBE_MTL_BASE_REG + 0x0044)
+
+/* TC/Queue registers, qnum=0-15 */
+#define SXGBE_MTL_TC_TXBASE_REG (SXGBE_MTL_BASE_REG + 0x0100)
+#define SXGBE_MTL_TXQ_OPMODE_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x00)
+#define SXGBE_MTL_SFMODE BIT(1)
+#define SXGBE_MTL_FIFO_LSHIFT 16
+#define SXGBE_MTL_ENABLE_QUEUE 0x00000008
+#define SXGBE_MTL_TXQ_UNDERFLOW_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x04)
+#define SXGBE_MTL_TXQ_DEBUG_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x08)
+#define SXGBE_MTL_TXQ_ETSCTL_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x10)
+#define SXGBE_MTL_TXQ_ETSSTATUS_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x14)
+#define SXGBE_MTL_TXQ_QUANTWEIGHT_REG(qnum) \
+ (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x18)
+
+#define SXGBE_MTL_TC_RXBASE_REG 0x1140
+#define SXGBE_RX_MTL_SFMODE BIT(5)
+#define SXGBE_MTL_RXQ_OPMODE_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x00)
+#define SXGBE_MTL_RXQ_MISPKTOVERFLOW_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x04)
+#define SXGBE_MTL_RXQ_DEBUG_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x08)
+#define SXGBE_MTL_RXQ_CTL_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x0C)
+#define SXGBE_MTL_RXQ_INTENABLE_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x30)
+#define SXGBE_MTL_RXQ_INTSTATUS_REG(qnum) \
+ (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x34)
+
+/* SXGBE DMA Registers */
+#define SXGBE_DMA_BASE_REG 0x3000
+#define SXGBE_DMA_MODE_REG (SXGBE_DMA_BASE_REG + 0x0000)
+#define SXGBE_DMA_SOFT_RESET BIT(0)
+#define SXGBE_DMA_SYSBUS_MODE_REG (SXGBE_DMA_BASE_REG + 0x0004)
+#define SXGBE_DMA_AXI_UNDEF_BURST BIT(0)
+#define SXGBE_DMA_ENHACE_ADDR_MODE BIT(11)
+#define SXGBE_DMA_INT_STATUS_REG (SXGBE_DMA_BASE_REG + 0x0008)
+#define SXGBE_DMA_AXI_ARCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0010)
+#define SXGBE_DMA_AXI_AWCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0018)
+#define SXGBE_DMA_DEBUG_STATUS0_REG (SXGBE_DMA_BASE_REG + 0x0020)
+#define SXGBE_DMA_DEBUG_STATUS1_REG (SXGBE_DMA_BASE_REG + 0x0024)
+#define SXGBE_DMA_DEBUG_STATUS2_REG (SXGBE_DMA_BASE_REG + 0x0028)
+#define SXGBE_DMA_DEBUG_STATUS3_REG (SXGBE_DMA_BASE_REG + 0x002C)
+#define SXGBE_DMA_DEBUG_STATUS4_REG (SXGBE_DMA_BASE_REG + 0x0030)
+#define SXGBE_DMA_DEBUG_STATUS5_REG (SXGBE_DMA_BASE_REG + 0x0034)
+
+/* Channel Registers, cha_num = 0-15 */
+#define SXGBE_DMA_CHA_BASE_REG \
+ (SXGBE_DMA_BASE_REG + 0x0100)
+#define SXGBE_DMA_CHA_CTL_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x00)
+#define SXGBE_DMA_PBL_X8MODE BIT(16)
+#define SXGBE_DMA_CHA_TXCTL_TSE_ENABLE BIT(12)
+#define SXGBE_DMA_CHA_TXCTL_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x04)
+#define SXGBE_DMA_CHA_RXCTL_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x08)
+#define SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x10)
+#define SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x14)
+#define SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x18)
+#define SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x1C)
+#define SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x24)
+#define SXGBE_DMA_CHA_RXDESC_TAILPTR_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x2C)
+#define SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x30)
+#define SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x34)
+#define SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x38)
+#define SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x3C)
+#define SXGBE_DMA_CHA_TXDESC_CURADDLO_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x44)
+#define SXGBE_DMA_CHA_RXDESC_CURADDLO_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x4C)
+#define SXGBE_DMA_CHA_CURTXBUF_ADDHI_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x50)
+#define SXGBE_DMA_CHA_CURTXBUF_ADDLO_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x54)
+#define SXGBE_DMA_CHA_CURRXBUF_ADDHI_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x58)
+#define SXGBE_DMA_CHA_CURRXBUF_ADDLO_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x5C)
+#define SXGBE_DMA_CHA_STATUS_REG(cha_num) \
+ (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x60)
+
+/* TX DMA control register specific */
+#define SXGBE_TX_START_DMA BIT(0)
+
+/* sxgbe tx configuration register bitfields */
+#define SXGBE_SPEED_10G 0x0
+#define SXGBE_SPEED_2_5G 0x1
+#define SXGBE_SPEED_1G 0x2
+#define SXGBE_SPEED_LSHIFT 29
+
+#define SXGBE_TX_ENABLE BIT(0)
+#define SXGBE_TX_DISDIC_ALGO BIT(1)
+#define SXGBE_TX_JABBER_DISABLE BIT(16)
+
+/* sxgbe rx configuration register bitfields */
+#define SXGBE_RX_ENABLE BIT(0)
+#define SXGBE_RX_ACS_ENABLE BIT(1)
+#define SXGBE_RX_WATCHDOG_DISABLE BIT(7)
+#define SXGBE_RX_JUMBPKT_ENABLE BIT(8)
+#define SXGBE_RX_CSUMOFFLOAD_ENABLE BIT(9)
+#define SXGBE_RX_LOOPBACK_ENABLE BIT(10)
+#define SXGBE_RX_ARPOFFLOAD_ENABLE BIT(31)
+
+/* sxgbe vlan Tag Register bitfields */
+#define SXGBE_VLAN_SVLAN_ENABLE BIT(18)
+#define SXGBE_VLAN_DOUBLEVLAN_ENABLE BIT(26)
+#define SXGBE_VLAN_INNERVLAN_ENABLE BIT(27)
+
+/* XMAC VLAN Tag Inclusion Register(0x0060) bitfields
+ * Below fields same for Inner VLAN Tag Inclusion
+ * Register(0x0064) register
+ */
+enum vlan_tag_ctl_tx {
+ VLAN_TAG_TX_NOP,
+ VLAN_TAG_TX_DEL,
+ VLAN_TAG_TX_INSERT,
+ VLAN_TAG_TX_REPLACE
+};
+#define SXGBE_VLAN_PRTY_CTL BIT(18)
+#define SXGBE_VLAN_CSVL_CTL BIT(19)
+
+/* SXGBE TX Q Flow Control Register bitfields */
+#define SXGBE_TX_FLOW_CTL_FCB BIT(0)
+#define SXGBE_TX_FLOW_CTL_TFB BIT(1)
+
+/* SXGBE RX Q Flow Control Register bitfields */
+#define SXGBE_RX_FLOW_CTL_ENABLE BIT(0)
+#define SXGBE_RX_UNICAST_DETECT BIT(1)
+#define SXGBE_RX_PRTYFLOW_CTL_ENABLE BIT(8)
+
+/* sxgbe rx Q control0 register bitfields */
+#define SXGBE_RX_Q_ENABLE 0x2
+
+/* SXGBE hardware features bitfield specific */
+/* Capability Register 0 */
+#define SXGBE_HW_FEAT_GMII(cap) ((cap & 0x00000002) >> 1)
+#define SXGBE_HW_FEAT_VLAN_HASH_FILTER(cap) ((cap & 0x00000010) >> 4)
+#define SXGBE_HW_FEAT_SMA(cap) ((cap & 0x00000020) >> 5)
+#define SXGBE_HW_FEAT_PMT_TEMOTE_WOP(cap) ((cap & 0x00000040) >> 6)
+#define SXGBE_HW_FEAT_PMT_MAGIC_PKT(cap) ((cap & 0x00000080) >> 7)
+#define SXGBE_HW_FEAT_RMON(cap) ((cap & 0x00000100) >> 8)
+#define SXGBE_HW_FEAT_ARP_OFFLOAD(cap) ((cap & 0x00000200) >> 9)
+#define SXGBE_HW_FEAT_IEEE1500_2008(cap) ((cap & 0x00001000) >> 12)
+#define SXGBE_HW_FEAT_EEE(cap) ((cap & 0x00002000) >> 13)
+#define SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(cap) ((cap & 0x00004000) >> 14)
+#define SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(cap) ((cap & 0x00010000) >> 16)
+#define SXGBE_HW_FEAT_MACADDR_COUNT(cap) ((cap & 0x007C0000) >> 18)
+#define SXGBE_HW_FEAT_TSTMAP_SRC(cap) ((cap & 0x06000000) >> 25)
+#define SXGBE_HW_FEAT_SRCADDR_VLAN(cap) ((cap & 0x08000000) >> 27)
+
+/* Capability Register 1 */
+#define SXGBE_HW_FEAT_RX_FIFO_SIZE(cap) ((cap & 0x0000001F))
+#define SXGBE_HW_FEAT_TX_FIFO_SIZE(cap) ((cap & 0x000007C0) >> 6)
+#define SXGBE_HW_FEAT_IEEE1588_HWORD(cap) ((cap & 0x00002000) >> 13)
+#define SXGBE_HW_FEAT_DCB(cap) ((cap & 0x00010000) >> 16)
+#define SXGBE_HW_FEAT_SPLIT_HDR(cap) ((cap & 0x00020000) >> 17)
+#define SXGBE_HW_FEAT_TSO(cap) ((cap & 0x00040000) >> 18)
+#define SXGBE_HW_FEAT_DEBUG_MEM_IFACE(cap) ((cap & 0x00080000) >> 19)
+#define SXGBE_HW_FEAT_RSS(cap) ((cap & 0x00100000) >> 20)
+#define SXGBE_HW_FEAT_HASH_TABLE_SIZE(cap) ((cap & 0x03000000) >> 24)
+#define SXGBE_HW_FEAT_L3L4_FILTER_NUM(cap) ((cap & 0x78000000) >> 27)
+
+/* Capability Register 2 */
+#define SXGBE_HW_FEAT_RX_MTL_QUEUES(cap) ((cap & 0x0000000F))
+#define SXGBE_HW_FEAT_TX_MTL_QUEUES(cap) ((cap & 0x000003C0) >> 6)
+#define SXGBE_HW_FEAT_RX_DMA_CHANNELS(cap) ((cap & 0x0000F000) >> 12)
+#define SXGBE_HW_FEAT_TX_DMA_CHANNELS(cap) ((cap & 0x003C0000) >> 18)
+#define SXGBE_HW_FEAT_PPS_OUTPUTS(cap) ((cap & 0x07000000) >> 24)
+#define SXGBE_HW_FEAT_AUX_SNAPSHOTS(cap) ((cap & 0x70000000) >> 28)
+
+/* DMAchannel interrupt enable specific */
+/* DMA Normal interrupt */
+#define SXGBE_DMA_INT_ENA_NIE BIT(16) /* Normal Summary */
+#define SXGBE_DMA_INT_ENA_TIE BIT(0) /* Transmit Interrupt */
+#define SXGBE_DMA_INT_ENA_TUE BIT(2) /* Transmit Buffer Unavailable */
+#define SXGBE_DMA_INT_ENA_RIE BIT(6) /* Receive Interrupt */
+
+#define SXGBE_DMA_INT_NORMAL \
+ (SXGBE_DMA_INT_ENA_NIE | SXGBE_DMA_INT_ENA_RIE | \
+ SXGBE_DMA_INT_ENA_TIE | SXGBE_DMA_INT_ENA_TUE)
+
+/* DMA Abnormal interrupt */
+#define SXGBE_DMA_INT_ENA_AIE BIT(15) /* Abnormal Summary */
+#define SXGBE_DMA_INT_ENA_TSE BIT(1) /* Transmit Stopped */
+#define SXGBE_DMA_INT_ENA_RUE BIT(7) /* Receive Buffer Unavailable */
+#define SXGBE_DMA_INT_ENA_RSE BIT(8) /* Receive Stopped */
+#define SXGBE_DMA_INT_ENA_FBE BIT(12) /* Fatal Bus Error */
+#define SXGBE_DMA_INT_ENA_CDEE BIT(13) /* Context Descriptor Error */
+
+#define SXGBE_DMA_INT_ABNORMAL \
+ (SXGBE_DMA_INT_ENA_AIE | SXGBE_DMA_INT_ENA_TSE | \
+ SXGBE_DMA_INT_ENA_RUE | SXGBE_DMA_INT_ENA_RSE | \
+ SXGBE_DMA_INT_ENA_FBE | SXGBE_DMA_INT_ENA_CDEE)
+
+#define SXGBE_DMA_ENA_INT (SXGBE_DMA_INT_NORMAL | SXGBE_DMA_INT_ABNORMAL)
+
+/* DMA channel interrupt status specific */
+#define SXGBE_DMA_INT_STATUS_REB2 BIT(21)
+#define SXGBE_DMA_INT_STATUS_REB1 BIT(20)
+#define SXGBE_DMA_INT_STATUS_REB0 BIT(19)
+#define SXGBE_DMA_INT_STATUS_TEB2 BIT(18)
+#define SXGBE_DMA_INT_STATUS_TEB1 BIT(17)
+#define SXGBE_DMA_INT_STATUS_TEB0 BIT(16)
+#define SXGBE_DMA_INT_STATUS_NIS BIT(15)
+#define SXGBE_DMA_INT_STATUS_AIS BIT(14)
+#define SXGBE_DMA_INT_STATUS_CTXTERR BIT(13)
+#define SXGBE_DMA_INT_STATUS_FBE BIT(12)
+#define SXGBE_DMA_INT_STATUS_RPS BIT(8)
+#define SXGBE_DMA_INT_STATUS_RBU BIT(7)
+#define SXGBE_DMA_INT_STATUS_RI BIT(6)
+#define SXGBE_DMA_INT_STATUS_TBU BIT(2)
+#define SXGBE_DMA_INT_STATUS_TPS BIT(1)
+#define SXGBE_DMA_INT_STATUS_TI BIT(0)
+
+#endif /* __SXGBE_REGMAP_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
new file mode 100644
index 000000000000..51c32194ba88
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
@@ -0,0 +1,91 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include "sxgbe_common.h"
+#include "sxgbe_xpcs.h"
+
+static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg)
+{
+ u32 value;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ value = readl(priv->ioaddr + XPCS_OFFSET + reg);
+
+ return value;
+}
+
+static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data)
+{
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ writel(data, priv->ioaddr + XPCS_OFFSET + reg);
+
+ return 0;
+}
+
+int sxgbe_xpcs_init(struct net_device *ndev)
+{
+ u32 value;
+
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ /* 10G XAUI mode */
+ sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
+ sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
+ sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13));
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
+
+ do {
+ value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
+ } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE);
+
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
+
+ do {
+ value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
+ } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
+
+ return 0;
+}
+
+int sxgbe_xpcs_init_1G(struct net_device *ndev)
+{
+ int value;
+
+ /* 10GBASE-X PCS (1G) mode */
+ sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
+ sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13));
+
+ value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
+ sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6));
+ sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13));
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
+
+ do {
+ value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
+ } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
+
+ value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+ sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
+
+ /* Auto Negotiation cluase 37 enable */
+ value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
+ sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
new file mode 100644
index 000000000000..6b26a50724d3
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
@@ -0,0 +1,38 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Byungho An <bh74.an@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_XPCS_H__
+#define __SXGBE_XPCS_H__
+
+/* XPCS Registers */
+#define XPCS_OFFSET 0x1A060000
+#define SR_PCS_MMD_CONTROL1 0x030000
+#define SR_PCS_CONTROL2 0x030007
+#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004
+#define VR_PCS_MMD_DIGITAL_STATUS 0x038010
+#define SR_MII_MMD_CONTROL 0x1F0000
+#define SR_MII_MMD_AN_ADV 0x1F0004
+#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005
+#define VR_MII_MMD_AN_CONTROL 0x1F8001
+#define VR_MII_MMD_AN_INT_STATUS 0x1F8002
+
+#define XPCS_QSEQ_STATE_STABLE 0x10
+#define XPCS_QSEQ_STATE_MPLLOFF 0x1c
+#define XPCS_TYPE_SEL_R 0x00
+#define XPCS_TYPE_SEL_X 0x01
+#define XPCS_TYPE_SEL_W 0x02
+#define XPCS_XAUI_MODE 0x00
+#define XPCS_RXAUI_MODE 0x01
+
+int sxgbe_xpcs_init(struct net_device *ndev);
+int sxgbe_xpcs_init_1G(struct net_device *ndev);
+
+#endif /* __SXGBE_XPCS_H__ */
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 174a92f5fe51..21c20ea0dad0 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -162,8 +162,8 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
return -EIO;
- memcpy(mac_address,
- MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
return 0;
}
@@ -172,8 +172,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data;
int i, rc;
- /* We can have one VI for each 8K region. However we need
- * multiple TX queues per channel.
+ /* We can have one VI for each 8K region. However, until we
+ * use TX option descriptors we need two TX queues per channel.
*/
efx->max_channels =
min_t(unsigned int,
@@ -565,10 +565,17 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* several of each (in fact that's the only option if host
* page size is >4K). So we may allocate some extra VIs just
* for writing PIO buffers through.
+ *
+ * The UC mapping contains (min_vis - 1) complete VIs and the
+ * first half of the next VI. Then the WC mapping begins with
+ * the second half of this last VI.
*/
uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
ER_DZ_TX_PIOBUF);
if (nic_data->n_piobufs) {
+ /* pio_write_vi_base rounds down to give the number of complete
+ * VIs inside the UC mapping.
+ */
pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
nic_data->n_piobufs) *
@@ -1955,6 +1962,9 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
int tx_descs = 0;
int spent = 0;
+ if (quota <= 0)
+ return spent;
+
read_ptr = channel->eventq_read_ptr;
for (;;) {
@@ -3145,12 +3155,10 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
table->dev_uc_count = -1;
} else {
table->dev_uc_count = 1 + netdev_uc_count(net_dev);
- memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
- memcpy(table->dev_uc_list[i].addr,
- uc->addr, ETH_ALEN);
+ ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
i++;
}
}
@@ -3162,8 +3170,7 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
eth_broadcast_addr(table->dev_mc_list[0].addr);
i = 1;
netdev_for_each_mc_addr(mc, net_dev) {
- memcpy(table->dev_mc_list[i].addr,
- mc->addr, ETH_ALEN);
+ ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
i++;
}
}
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 207ac9a1e3de..62a55dde61d5 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -227,36 +227,6 @@
#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
-/* RX_USER_DESC */
-#define ESF_DZ_RX_USR_RESERVED_LBN 62
-#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
-#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
-#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
-#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
-#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
-#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
-#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
-#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
-#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
-#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
-#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
-#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
-#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
-#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
-
/* TX_CSUM_TSTAMP_DESC */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
@@ -338,37 +308,6 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_USER_DESC */
-#define ESF_DZ_TX_USR_TYPE_LBN 63
-#define ESF_DZ_TX_USR_TYPE_WIDTH 1
-#define ESF_DZ_TX_USR_CONT_LBN 62
-#define ESF_DZ_TX_USR_CONT_WIDTH 1
-#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
-#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
-#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
-#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
-#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
-#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
-#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
-#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
-#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
-#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
-#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
-#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
-#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
/*************************************************************************/
/* TX_DESC_UPD_REG: Transmit descriptor update register.
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 83d464347021..57b971e5e6b2 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -503,8 +503,6 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail;
}
- channel->n_rx_frm_trunc = 0;
-
return 0;
fail:
@@ -1014,7 +1012,7 @@ static int efx_probe_port(struct efx_nic *efx)
return rc;
/* Initialise MAC address to permanent address */
- memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
+ ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
return 0;
}
@@ -1346,20 +1344,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
- if (rc > 0) {
+ rc = pci_enable_msix_range(efx->pci_dev,
+ xentries, 1, n_channels);
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ } else if (rc < n_channels) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors"
" available (%d < %u).\n", rc, n_channels);
netif_err(efx, drv, efx->net_dev,
"WARNING: Performance may be reduced.\n");
- EFX_BUG_ON_PARANOID(rc >= n_channels);
n_channels = rc;
- rc = pci_enable_msix(efx->pci_dev, xentries,
- n_channels);
}
- if (rc == 0) {
+ if (rc > 0) {
efx->n_channels = n_channels;
if (n_channels > extra_channels)
n_channels -= extra_channels;
@@ -1375,11 +1376,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
- } else {
- /* Fall back to single channel MSI */
- efx->interrupt_mode = EFX_INT_MODE_MSI;
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI-X\n");
}
}
@@ -1603,6 +1599,8 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
goto fail1;
+ efx_set_channels(efx);
+
rc = efx->type->dimension_resources(efx);
if (rc)
goto fail2;
@@ -1613,7 +1611,6 @@ static int efx_probe_nic(struct efx_nic *efx)
efx->rx_indir_table[i] =
ethtool_rxfh_indir_default(i, efx->rss_spread);
- efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
@@ -2115,7 +2112,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
- char *new_addr = addr->sa_data;
+ u8 *new_addr = addr->sa_data;
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
@@ -2124,7 +2121,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
return -EADDRNOTAVAIL;
}
- memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
+ ether_addr_copy(net_dev->dev_addr, new_addr);
efx_sriov_mac_address_changed(efx);
/* Reconfigure the MAC */
@@ -3273,6 +3270,6 @@ module_exit(efx_exit_module);
MODULE_AUTHOR("Solarflare Communications and "
"Michael Brown <mbrown@fensystems.co.uk>");
-MODULE_DESCRIPTION("Solarflare Communications network driver");
+MODULE_DESCRIPTION("Solarflare network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index dbd7b78fe01c..99032581336f 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,7 +14,7 @@
#include "net_driver.h"
#include "filter.h"
-/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
/* TX */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 229428915aa8..0de8b07c24c2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -251,6 +251,9 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
* @test_index: Starting index of the test
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
*/
static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
@@ -290,6 +293,12 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
* @tests: Efx self-test results structure, or %NULL
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
@@ -444,7 +453,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
- int already_up;
+ bool already_up;
int rc = -ENOMEM;
efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
@@ -452,8 +461,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
goto fail;
if (efx->state != STATE_READY) {
- rc = -EIO;
- goto fail1;
+ rc = -EBUSY;
+ goto out;
}
netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
@@ -466,7 +475,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
- goto fail1;
+ goto out;
}
}
@@ -479,8 +488,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
-fail1:
- /* Fill ethtool results structures */
+out:
efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
kfree(efx_tests);
fail:
@@ -691,7 +699,6 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
}
-
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
@@ -720,7 +727,7 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
}
/* MAC address mask including only I/G bit */
-static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
#define PORT_FULL_MASK ((__force __be16)~0)
@@ -780,16 +787,16 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
rule->flow_type = ETHER_FLOW;
if (spec.match_flags &
(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
- memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
+ ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
- memset(mac_mask->h_dest, ~0, ETH_ALEN);
+ eth_broadcast_addr(mac_mask->h_dest);
else
- memcpy(mac_mask->h_dest, mac_addr_ig_mask,
- ETH_ALEN);
+ ether_addr_copy(mac_mask->h_dest,
+ mac_addr_ig_mask);
}
if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
- memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
- memset(mac_mask->h_source, ~0, ETH_ALEN);
+ ether_addr_copy(mac_entry->h_source, spec.rem_mac);
+ eth_broadcast_addr(mac_mask->h_source);
}
if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
mac_entry->h_proto = spec.ether_type;
@@ -961,13 +968,13 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
else
return -EINVAL;
- memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
+ ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
}
if (!is_zero_ether_addr(mac_mask->h_source)) {
if (!is_broadcast_ether_addr(mac_mask->h_source))
return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
- memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
+ ether_addr_copy(spec.rem_mac, mac_entry->h_source);
}
if (mac_mask->h_proto) {
if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 18d6f761f4d0..8ec20b713cc6 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -422,7 +422,6 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
}
-
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
@@ -467,6 +466,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
efx_schedule_channel_irq(efx_get_channel(efx, 1));
return IRQ_HANDLED;
}
+
/**************************************************************************
*
* RSS
@@ -1358,6 +1358,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
case 100: link_speed = 1; break;
default: link_speed = 0; break;
}
+
/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
* as advertised. Disable to ensure packets are not
* indefinitely held and TX queue can be flushed at any point
@@ -2182,7 +2183,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
}
/* Read the MAC addresses */
- memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
+ ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
efx->phy_type, efx->mdio.prtad);
@@ -2868,4 +2869,3 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
-
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index f72489a105ca..a08761360cdf 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -311,7 +311,6 @@ static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
*/
void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
{
-
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
@@ -1249,6 +1248,9 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
int tx_packets = 0;
int spent = 0;
+ if (budget <= 0)
+ return spent;
+
read_ptr = channel->eventq_read_ptr;
for (;;) {
@@ -1609,7 +1611,6 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3ef298d3c47e..d0ed7f71ea7e 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -243,7 +243,7 @@ static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
}
if (addr != NULL) {
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
- memcpy(spec->loc_mac, addr, ETH_ALEN);
+ ether_addr_copy(spec->loc_mac, addr);
}
return 0;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index eb59abb57e85..7bd4b14bf3b3 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1187,6 +1187,9 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
int rc;
BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+ /* we need __aligned(2) for ether_addr_copy */
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
@@ -1199,11 +1202,10 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
}
if (mac_address)
- memcpy(mac_address,
- port_num ?
- MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
- MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
- ETH_ALEN);
+ ether_addr_copy(mac_address,
+ port_num ?
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
if (fw_subtype_list) {
for (i = 0;
i < MCDI_VAR_ARRAY_LEN(outlen,
@@ -1532,7 +1534,7 @@ static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
MC_CMD_FILTER_MODE_SIMPLE);
- memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
+ ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 91d23252f8fa..e5fc4e1574b5 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -854,8 +854,8 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
- memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
- efx->net_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index af2b8c59a903..8a400a0595eb 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1323,7 +1323,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
-
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
*
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 79226b19e3c4..32d969e857f7 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -530,4 +530,3 @@ void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}
-
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index d7a36829649a..6b861e3de4b0 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -223,7 +223,6 @@ struct efx_ptp_timeset {
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
- * @evt_overflow: Boolean indicating that event list has overflowed
* @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
@@ -275,7 +274,6 @@ struct efx_ptp_data {
struct list_head evt_list;
struct list_head evt_free_list;
spinlock_t evt_lock;
- bool evt_overflow;
struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
@@ -768,37 +766,36 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
return -EAGAIN;
}
- /* Convert the NIC time into kernel time. No correction is required-
- * this time is the output of a firmware process.
- */
- mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
- ptp->timeset[last_good].minor, 0);
-
- /* Calculate delay from actual PPS to last_time */
- delta = ktime_to_timespec(mc_time);
- delta.tv_nsec +=
- last_time->ts_real.tv_nsec -
- (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
-
- /* It is possible that the seconds rolled over between taking
+ /* Calculate delay from last good sync (host time) to last_time.
+ * It is possible that the seconds rolled over between taking
* the start reading and the last value written by the host. The
* timescales are such that a gap of more than one second is never
- * expected.
+ * expected. delta is *not* normalised.
*/
start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
- if (start_sec != last_sec) {
- if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
- netif_warn(efx, hw, efx->net_dev,
- "PTP bad synchronisation seconds\n");
- return -EAGAIN;
- } else {
- delta.tv_sec = 1;
- }
- } else {
- delta.tv_sec = 0;
+ if (start_sec != last_sec &&
+ ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP bad synchronisation seconds\n");
+ return -EAGAIN;
}
+ delta.tv_sec = (last_sec - start_sec) & 1;
+ delta.tv_nsec =
+ last_time->ts_real.tv_nsec -
+ (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+ /* Convert the NIC time at last good sync into kernel time.
+ * No correction is required - this time is the output of a
+ * firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
+
+ /* Calculate delay from NIC top of second to last_time */
+ delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
+ /* Set PPS timestamp to match NIC top of second */
ptp->host_time_pps = *last_time;
pps_sub_ts(&ptp->host_time_pps, delta);
@@ -941,11 +938,6 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
}
}
}
- /* If the event overflow flag is set and the event list is now empty
- * clear the flag to re-enable the overflow warning message.
- */
- if (ptp->evt_overflow && list_empty(&ptp->evt_list))
- ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
}
@@ -989,11 +981,6 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
break;
}
}
- /* If the event overflow flag is set and the event list is now empty
- * clear the flag to re-enable the overflow warning message.
- */
- if (ptp->evt_overflow && list_empty(&ptp->evt_list))
- ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
return rc;
@@ -1147,7 +1134,6 @@ static int efx_ptp_stop(struct efx_nic *efx)
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
list_move(cursor, &efx->ptp_data->evt_free_list);
}
- ptp->evt_overflow = false;
spin_unlock_bh(&efx->ptp_data->evt_lock);
return rc;
@@ -1208,6 +1194,7 @@ static const struct ptp_clock_info efx_phc_clock_info = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 1,
.adjfreq = efx_phc_adjfreq,
.adjtime = efx_phc_adjtime,
@@ -1253,7 +1240,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
spin_lock_init(&ptp->evt_lock);
for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
- ptp->evt_overflow = false;
/* Get the NIC PTP attributes and set up time conversions */
rc = efx_ptp_get_attributes(efx);
@@ -1380,6 +1366,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
u8 *match_data_012, *match_data_345;
unsigned int version;
+ u8 *data;
match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
@@ -1388,7 +1375,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
return false;
}
- version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
+ data = skb->data;
+ version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
if (version != PTP_VERSION_V1) {
return false;
}
@@ -1396,13 +1384,14 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* PTP V1 uses all six bytes of the UUID to match the packet
* to the timestamp
*/
- match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
- match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
+ match_data_012 = data + PTP_V1_UUID_OFFSET;
+ match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
} else {
if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
return false;
}
- version = skb->data[PTP_V2_VERSION_OFFSET];
+ data = skb->data;
+ version = data[PTP_V2_VERSION_OFFSET];
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
return false;
}
@@ -1414,17 +1403,17 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
* enhanced mode fixes this issue and uses bytes 0-2
* and byte 5-7 of the UUID.
*/
- match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
+ match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
if (ptp->mode == MC_CMD_PTP_MODE_V2) {
- match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
} else {
- match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
}
}
/* Does this packet require timestamping? */
- if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+ if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
match->state = PTP_PACKET_STATE_UNMATCHED;
/* We expect the sequence number to be in the same position in
@@ -1440,8 +1429,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
(match_data_345[0] << 24));
match->words[1] = (match_data_345[1] |
(match_data_345[2] << 8) |
- (skb->data[PTP_V1_SEQUENCE_OFFSET +
- PTP_V1_SEQUENCE_LENGTH - 1] <<
+ (data[PTP_V1_SEQUENCE_OFFSET +
+ PTP_V1_SEQUENCE_LENGTH - 1] <<
16));
} else {
match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
@@ -1635,13 +1624,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
list_add_tail(&evt->link, &ptp->evt_list);
queue_work(ptp->workwq, &ptp->work);
- } else if (!ptp->evt_overflow) {
- /* Log a warning message and set the event overflow flag.
- * The message won't be logged again until the event queue
- * becomes empty.
- */
+ } else if (net_ratelimit()) {
+ /* Log a rate-limited warning message. */
netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
- ptp->evt_overflow = true;
}
spin_unlock_bh(&ptp->evt_lock);
}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 26641817a9c7..0fc5baef45b1 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -50,7 +50,7 @@ struct efx_loopback_payload {
} __packed;
/* Loopback test source MAC address */
-static const unsigned char payload_source[ETH_ALEN] = {
+static const u8 payload_source[ETH_ALEN] __aligned(2) = {
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
};
@@ -366,8 +366,8 @@ static void efx_iterate_state(struct efx_nic *efx)
struct efx_loopback_payload *payload = &state->payload;
/* Initialise the layerII header */
- memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
- memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
+ ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
+ ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
payload->header.h_proto = htons(ETH_P_IP);
/* saddr set later and used as incrementing count */
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 0c38f926871e..9a9205e77896 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1095,7 +1095,7 @@ static void efx_sriov_peer_work(struct work_struct *data)
/* Fill the remaining addresses */
list_for_each_entry(local_addr, &efx->local_addr_list, link) {
- memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN);
+ ether_addr_copy(peer->mac_addr, local_addr->addr);
peer->tci = 0;
++peer;
++peer_count;
@@ -1303,8 +1303,7 @@ int efx_sriov_init(struct efx_nic *efx)
goto fail_vfs;
rtnl_lock();
- memcpy(vfdi_status->peers[0].mac_addr,
- net_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr);
efx->vf_init_count = efx->vf_count;
rtnl_unlock();
@@ -1452,8 +1451,8 @@ void efx_sriov_mac_address_changed(struct efx_nic *efx)
if (!efx->vf_init_count)
return;
- memcpy(vfdi_status->peers[0].mac_addr,
- efx->net_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(vfdi_status->peers[0].mac_addr,
+ efx->net_dev->dev_addr);
queue_work(vfdi_workqueue, &efx->peer_work);
}
@@ -1570,7 +1569,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
vf = efx->vf + vf_i;
mutex_lock(&vf->status_lock);
- memcpy(vf->addr.mac_addr, mac, ETH_ALEN);
+ ether_addr_copy(vf->addr.mac_addr, mac);
__efx_sriov_update_vf_addr(vf);
mutex_unlock(&vf->status_lock);
@@ -1633,7 +1632,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
vf = efx->vf + vf_i;
ivi->vf = vf_i;
- memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN);
+ ether_addr_copy(ivi->mac, vf->addr.mac_addr);
ivi->tx_rate = 0;
tci = ntohs(vf->addr.tci);
ivi->vlan = tci & VLAN_VID_MASK;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 75d11fa4eb0a..fa9475300411 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* Requires TX checksum offload support.
*/
-/* Number of bytes inserted at the start of a TSO header buffer,
- * similar to NET_IP_ALIGN.
- */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define TSOH_OFFSET 0
-#else
-#define TSOH_OFFSET NET_IP_ALIGN
-#endif
-
#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
/**
@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
- if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+ if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
unsigned index =
(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
struct efx_buffer *page_buf =
&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
unsigned offset =
- TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+ TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
if (unlikely(!page_buf->addr) &&
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
} else {
tx_queue->tso_long_headers++;
- buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+ buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
if (unlikely(!buffer->heap_buf))
return NULL;
- result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+ result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
}
@@ -1011,7 +1002,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
static int tso_start(struct tso_state *st, struct efx_nic *efx,
const struct sk_buff *skb)
{
- bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+ bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
struct device *dma_dev = &efx->pci_dev->dev;
unsigned int header_len, in_len;
dma_addr_t dma_addr;
@@ -1037,7 +1028,7 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
st->out_len = skb->len - header_len;
- if (!use_options) {
+ if (!use_opt_desc) {
st->header_unmap_len = 0;
if (likely(in_len == 0)) {
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 5eb933c97bba..7daa7d433099 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -987,7 +987,7 @@ out_unlock:
spin_unlock(&priv->lock);
out:
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index ff57a46388ee..6072f093e6b4 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1614,7 +1614,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
skb->data, skb->len, PCI_DMA_TODEVICE);
if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
sis_priv->tx_ring[entry].bufptr))) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
sis_priv->tx_skbuff[entry] = NULL;
net_dev->stats.tx_dropped++;
spin_unlock_irqrestore(&sis_priv->lock, flags);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index c50fb08c9905..66b05e62f70a 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -551,7 +551,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
spin_unlock_irqrestore(&lp->lock, flags);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 839c0e6cca01..d1b4dca53a9d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -621,7 +621,7 @@ static void smc_hardware_send_pkt(unsigned long data)
done: if (!THROTTLE_TX_PKTS)
netif_wake_queue(dev);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
/*
@@ -657,7 +657,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_warn(dev, "Far too big packet error.\n");
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 6382b7c416f4..a0fc151da40d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -439,7 +439,8 @@ static int smsc911x_request_resources(struct platform_device *pdev)
/* Request clock */
pdata->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk))
- netdev_warn(ndev, "couldn't get clock %li\n", PTR_ERR(pdata->clk));
+ dev_dbg(&pdev->dev, "couldn't get clock %li\n",
+ PTR_ERR(pdata->clk));
return ret;
}
@@ -1672,7 +1673,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
freespace -= (skb->len + 32);
skb_tx_timestamp(skb);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
smsc911x_tx_update_txcounters(dev);
@@ -2379,8 +2380,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
int res_size, irq_flags;
int retval;
- pr_info("Driver version %s\n", SMSC_DRV_VERSION);
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smsc911x-memory");
if (!res)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index f2d7c702c77f..2d09c116cbc8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -26,6 +26,16 @@ config STMMAC_PLATFORM
If unsure, say N.
+config DWMAC_SOCFPGA
+ bool "SOCFPGA dwmac support"
+ depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST)
+ help
+ Support for ethernet controller on Altera SOCFPGA
+
+ This selects the Altera SOCFPGA SoC glue layer support
+ for the stmmac device driver. This driver is used for
+ arria5 and cyclone5 FPGA SoCs.
+
config DWMAC_SUNXI
bool "Allwinner GMAC support"
depends on STMMAC_PLATFORM && ARCH_SUNXI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index dcef28775dad..18695ebef7e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -3,6 +3,7 @@ stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
+stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
new file mode 100644
index 000000000000..fd8a217556a1
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -0,0 +1,130 @@
+/* Copyright Altera Corporation (C) 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Adopted from dwmac-sti.c
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
+
+struct socfpga_dwmac {
+ int interface;
+ u32 reg_offset;
+ u32 reg_shift;
+ struct device *dev;
+ struct regmap *sys_mgr_base_addr;
+};
+
+static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *sys_mgr_base_addr;
+ u32 reg_offset, reg_shift;
+ int ret;
+
+ dwmac->interface = of_get_phy_mode(np);
+
+ sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
+ if (IS_ERR(sys_mgr_base_addr)) {
+ dev_info(dev, "No sysmgr-syscon node found\n");
+ return PTR_ERR(sys_mgr_base_addr);
+ }
+
+ ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 1, &reg_offset);
+ if (ret) {
+ dev_info(dev, "Could not read reg_offset from sysmgr-syscon!\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 2, &reg_shift);
+ if (ret) {
+ dev_info(dev, "Could not read reg_shift from sysmgr-syscon!\n");
+ return -EINVAL;
+ }
+
+ dwmac->reg_offset = reg_offset;
+ dwmac->reg_shift = reg_shift;
+ dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
+ dwmac->dev = dev;
+
+ return 0;
+}
+
+static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
+{
+ struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
+ int phymode = dwmac->interface;
+ u32 reg_offset = dwmac->reg_offset;
+ u32 reg_shift = dwmac->reg_shift;
+ u32 ctrl, val;
+
+ switch (phymode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ break;
+ default:
+ dev_err(dwmac->dev, "bad phy mode %d\n", phymode);
+ return -EINVAL;
+ }
+
+ regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
+ ctrl |= val << reg_shift;
+
+ regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+ return 0;
+}
+
+static void *socfpga_dwmac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+ struct socfpga_dwmac *dwmac;
+
+ dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return ERR_PTR(-ENOMEM);
+
+ ret = socfpga_dwmac_parse_data(dwmac, dev);
+ if (ret) {
+ dev_err(dev, "Unable to parse OF data\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = socfpga_dwmac_setup(dwmac);
+ if (ret) {
+ dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return dwmac;
+}
+
+const struct stmmac_of_data socfpga_gmac_data = {
+ .setup = socfpga_dwmac_probe,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f9e60d7918c4..ca01035634a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -136,6 +136,9 @@ extern const struct stmmac_of_data sun7i_gmac_data;
#ifdef CONFIG_DWMAC_STI
extern const struct stmmac_of_data sti_gmac_data;
#endif
+#ifdef CONFIG_DWMAC_SOCFPGA
+extern const struct stmmac_of_data socfpga_gmac_data;
+#endif
extern struct platform_driver stmmac_pltfr_driver;
static inline int stmmac_register_platform(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8543e1cfd55e..d940034acdd4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1303,7 +1303,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
priv->tx_skbuff[entry] = NULL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 8fb32a80f1c1..46aef5108bea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -38,6 +38,9 @@ static const struct of_device_id stmmac_dt_ids[] = {
{ .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
{ .compatible = "st,stid127-dwmac", .data = &sti_gmac_data},
#endif
+#ifdef CONFIG_DWMAC_SOCFPGA
+ { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+#endif
/* SoC specific glue layers should come before generic bindings */
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 7680581ebe12..b7ad3565566c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -164,6 +164,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = stmmac_adjust_freq,
.adjtime = stmmac_adjust_time,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8e2266e1f260..79606f47a08e 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9041,7 +9041,7 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
struct msix_entry msi_vec[NIU_NUM_LDG];
struct niu_parent *parent = np->parent;
struct pci_dev *pdev = np->pdev;
- int i, num_irqs, err;
+ int i, num_irqs;
u8 first_ldg;
first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
@@ -9053,21 +9053,16 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
(np->port == 0 ? 3 : 1));
BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
-retry:
for (i = 0; i < num_irqs; i++) {
msi_vec[i].vector = 0;
msi_vec[i].entry = i;
}
- err = pci_enable_msix(pdev, msi_vec, num_irqs);
- if (err < 0) {
+ num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
+ if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
return;
}
- if (err > 0) {
- num_irqs = err;
- goto retry;
- }
np->flags |= NIU_FLAGS_MSIX;
for (i = 0; i < num_irqs; i++)
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index c2799dc46325..102a66fc54a2 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -688,7 +688,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
}
dev->stats.tx_packets++;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
gp->tx_old = entry;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d6d8ec676c8..5d5fec6c4eb0 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -378,7 +378,6 @@ struct cpsw_priv {
u32 version;
u32 coal_intvl;
u32 bus_freq_mhz;
- struct net_device_stats stats;
int rx_packet_max;
int host_port;
struct clk *clk;
@@ -673,8 +672,8 @@ static void cpsw_tx_handler(void *token, int len, int status)
if (unlikely(netif_queue_stopped(ndev)))
netif_wake_queue(ndev);
cpts_tx_timestamp(priv->cpts, skb);
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
}
@@ -700,10 +699,10 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpts_rx_timestamp(priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
- priv->stats.rx_bytes += len;
- priv->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
} else {
- priv->stats.rx_dropped++;
+ ndev->stats.rx_dropped++;
new_skb = skb;
}
@@ -1313,7 +1312,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
- priv->stats.tx_dropped++;
+ ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1337,7 +1336,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
fail:
- priv->stats.tx_dropped++;
+ ndev->stats.tx_dropped++;
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
@@ -1477,7 +1476,6 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(req);
int slave_no = cpsw_slave_index(priv);
if (!netif_running(dev))
@@ -1490,14 +1488,11 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
case SIOCGHWTSTAMP:
return cpsw_hwtstamp_get(dev, req);
#endif
- case SIOCGMIIPHY:
- data->phy_id = priv->slaves[slave_no].phy->addr;
- break;
- default:
- return -ENOTSUPP;
}
- return 0;
+ if (!priv->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
}
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1505,7 +1500,7 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
- priv->stats.tx_errors++;
+ ndev->stats.tx_errors++;
cpsw_intr_disable(priv);
cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_chan_stop(priv->txch);
@@ -1544,12 +1539,6 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- return &priv->stats;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cpsw_ndo_poll_controller(struct net_device *ndev)
{
@@ -1642,7 +1631,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
- .ndo_get_stats = cpsw_ndo_get_stats,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 8c351f100aca..a3bbf59eaafd 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,10 +31,6 @@
#ifdef CONFIG_TI_CPTS
-static struct sock_filter ptp_filter[] = {
- PTP_FILTER
-};
-
#define cpts_read32(c, r) __raw_readl(&c->reg->r)
#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
@@ -217,6 +213,7 @@ static struct ptp_clock_info cpts_info = {
.name = "CTPS timer",
.max_adj = 1000000,
.n_ext_ts = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = cpts_ptp_adjfreq,
.adjtime = cpts_ptp_adjtime,
@@ -300,7 +297,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
u64 ns = 0;
struct cpts_event *event;
struct list_head *this, *next;
- unsigned int class = sk_run_filter(skb, ptp_filter);
+ unsigned int class = ptp_classify_raw(skb);
unsigned long flags;
u16 seqid;
u8 mtype;
@@ -371,10 +368,6 @@ int cpts_register(struct device *dev, struct cpts *cpts,
int err, i;
unsigned long flags;
- if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
- pr_err("cpts: bad ptp filter\n");
- return -EINVAL;
- }
cpts->info = cpts_info;
cpts->clock = ptp_clock_register(&cpts->info, dev);
if (IS_ERR(cpts->clock)) {
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 17503da9f7a5..7e1c91d41a87 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -659,6 +659,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
struct info_mpipe *info_mpipe =
container_of(napi, struct info_mpipe, napi);
+ if (budget <= 0)
+ goto done;
+
instance = info_mpipe->instance;
while ((n = gxio_mpipe_iqueue_try_peek(
&info_mpipe->iqueue,
@@ -870,6 +873,7 @@ static struct ptp_clock_info ptp_mpipe_caps = {
.name = "mPIPE clock",
.max_adj = 999999999,
.n_ext_ts = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = ptp_mpipe_adjfreq,
.adjtime = ptp_mpipe_adjtime,
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index edb2e12a0fe2..e5a5c5d4ce0c 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -831,6 +831,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
unsigned int work = 0;
+ if (budget <= 0)
+ goto done;
+
while (priv->active) {
int index = qup->__packet_receive_read;
if (index == qsp->__packet_receive_queue.__packet_write)
@@ -1821,7 +1824,7 @@ busy:
/* Handle completions. */
for (i = 0; i < nolds; i++)
- kfree_skb(olds[i]);
+ dev_consume_skb_any(olds[i]);
/* Update stats. */
u64_stats_update_begin(&stats->syncp);
@@ -2005,7 +2008,7 @@ busy:
/* Handle completions. */
for (i = 0; i < nolds; i++)
- kfree_skb(olds[i]);
+ dev_consume_skb_any(olds[i]);
/* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
u64_stats_update_begin(&stats->syncp);
@@ -2068,14 +2071,14 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
cpu_stats = &priv->cpu[i]->stats;
do {
- start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
trx_packets = cpu_stats->rx_packets;
ttx_packets = cpu_stats->tx_packets;
trx_bytes = cpu_stats->rx_bytes;
ttx_bytes = cpu_stats->tx_bytes;
trx_errors = cpu_stats->rx_errors;
trx_dropped = cpu_stats->rx_dropped;
- } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
rx_packets += trx_packets;
tx_packets += ttx_packets;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 3f4a32e39d27..0282d0161859 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -860,7 +860,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
if (skb) {
pci_unmap_single(card->pdev, buf_addr, skb->len,
PCI_DMA_TODEVICE);
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
}
}
return 0;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 88e9c73cebc0..fef5573dbfca 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1645,6 +1645,9 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
int received = 0, handled;
u32 status;
+ if (budget <= 0)
+ return received;
+
spin_lock(&lp->rx_lock);
status = tc_readl(&tr->Int_Src);
do {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 6ac20a6738f4..f61dc2b72bb2 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1022,7 +1022,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* The chip-specific entries in the device structure. */
dev->netdev_ops = &rhine_netdev_ops;
- dev->ethtool_ops = &netdev_ethtool_ops,
+ dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
@@ -1678,7 +1678,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
/* Must use alignment buffer. */
if (skb->len > PKT_BUF_SZ) {
/* packet too long, drop it */
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
rp->tx_skbuff[entry] = NULL;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -1698,7 +1698,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
pci_map_single(rp->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
rp->tx_skbuff_dma[entry] = 0;
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -1836,7 +1836,7 @@ static void rhine_tx(struct net_device *dev)
rp->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
}
- dev_kfree_skb(rp->tx_skbuff[entry]);
+ dev_consume_skb_any(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}
@@ -2072,16 +2072,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
netdev_stats_to_stats64(stats, &dev->stats);
do {
- start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
stats->rx_packets = rp->rx_stats.packets;
stats->rx_bytes = rp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
+ start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
stats->tx_packets = rp->tx_stats.packets;
stats->tx_bytes = rp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
return stats;
}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ad61d26a44f3..de08e86db209 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2565,7 +2565,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
/* The hardware can handle at most 7 memory segments, so merge
* the skb if there are more */
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 0df36c6ec7f4..104d46f37969 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -641,11 +641,10 @@ static int w5100_hw_probe(struct platform_device *pdev)
if (!mem)
return -ENXIO;
mem_size = resource_size(mem);
- if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
- return -EBUSY;
- priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
- if (!priv->base)
- return -EBUSY;
+
+ priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 71c27b3292f1..1f33c4c86c20 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -561,11 +561,10 @@ static int w5300_hw_probe(struct platform_device *pdev)
if (!mem)
return -ENXIO;
mem_size = resource_size(mem);
- if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
- return -EBUSY;
- priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
- if (!priv->base)
- return -EBUSY;
+
+ priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a4347508031c..fa193c4688da 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -771,8 +771,8 @@ static void ll_temac_recv(struct net_device *ndev)
/* if we're doing rx csum offload, set it up */
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
- (skb->protocol == __constant_htons(ETH_P_IP)) &&
- (skb->len > 64)) {
+ (skb->protocol == htons(ETH_P_IP)) &&
+ (skb->len > 64)) {
skb->csum = cur_p->app3 & 0xFFFF;
skb->ip_summed = CHECKSUM_COMPLETE;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4bfdf8c7ada0..7b0a73556264 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -756,7 +756,7 @@ static void axienet_recv(struct net_device *ndev)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == __constant_htons(ETH_P_IP) &&
+ skb->protocol == htons(ETH_P_IP) &&
skb->len > 64) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 36052b98b3fc..0d87c67a5ff7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -795,18 +795,6 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
}
/**
- * xemaclite_mdio_reset - Reset the mdio bus.
- * @bus: Pointer to the MII bus
- *
- * This function is required(?) as per Documentation/networking/phy.txt.
- * There is no reset in this device; this function always returns 0.
- */
-static int xemaclite_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
-/**
* xemaclite_mdio_setup - Register mii_bus for the Emaclite device
* @lp: Pointer to the Emaclite device private data
* @ofdev: Pointer to OF device structure
@@ -861,7 +849,6 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus->name = "Xilinx Emaclite MDIO";
bus->read = xemaclite_mdio_read;
bus->write = xemaclite_mdio_write;
- bus->reset = xemaclite_mdio_reset;
bus->parent = dev;
bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
@@ -1037,7 +1024,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
skb_tx_timestamp(new_skb);
dev->stats.tx_bytes += len;
- dev_kfree_skb(new_skb);
+ dev_consume_skb_any(new_skb);
return 0;
}
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index 3f431019e615..b81bc9fca378 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -23,6 +23,7 @@ config IXP4XX_ETH
tristate "Intel IXP4xx Ethernet support"
depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
select PHYLIB
+ select NET_PTP_CLASSIFY
---help---
Say Y here if you want to use built-in Ethernet ports
on IXP4xx processor.
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 25283f17d82f..f7e0f0f7c2e2 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -256,10 +256,6 @@ static int ports_open;
static struct port *npe_port_tab[MAX_NPES];
static struct dma_pool *dma_pool;
-static struct sock_filter ptp_filter[] = {
- PTP_FILTER
-};
-
static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
{
u8 *data = skb->data;
@@ -267,7 +263,7 @@ static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
u16 *hi, *id;
u32 lo;
- if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
+ if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
return 0;
offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
@@ -1413,11 +1409,6 @@ static int eth_init_one(struct platform_device *pdev)
char phy_id[MII_BUS_ID_SIZE + 3];
int err;
- if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
- pr_err("ixp4xx_eth: bad ptp filter\n");
- return -EINVAL;
- }
-
if (!(dev = alloc_etherdev(sizeof(struct port))))
return -ENOMEM;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 61dd2447e1bb..81901659cc9e 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1184,7 +1184,7 @@ static void __exit yam_cleanup_driver(void)
struct yam_mcs *p;
int i;
- del_timer(&yam_timer);
+ del_timer_sync(&yam_timer);
for (i = 0; i < NR_PORTS; i++) {
struct net_device *dev = yam_devs[i];
if (dev) {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 7b594ce3f21d..13010b4dae5b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -30,6 +30,7 @@
/* Fwd declaration */
struct hv_netvsc_packet;
+struct ndis_tcp_ip_checksum_info;
/* Represent the xfer page packet which contains 1 or more netvsc packet */
struct xferpage_packet {
@@ -73,7 +74,7 @@ struct hv_netvsc_packet {
} completion;
/* This points to the memory after page_buf */
- void *extension;
+ struct rndis_message *rndis_msg;
u32 total_data_buflen;
/* Points to the send/receive buffer where the ethernet frame is */
@@ -117,7 +118,8 @@ int netvsc_send(struct hv_device *device,
void netvsc_linkstatus_callback(struct hv_device *device_obj,
unsigned int status);
int netvsc_recv_callback(struct hv_device *device_obj,
- struct hv_netvsc_packet *packet);
+ struct hv_netvsc_packet *packet,
+ struct ndis_tcp_ip_checksum_info *csum_info);
int rndis_filter_open(struct hv_device *dev);
int rndis_filter_close(struct hv_device *dev);
int rndis_filter_device_add(struct hv_device *dev,
@@ -126,11 +128,6 @@ void rndis_filter_device_remove(struct hv_device *dev);
int rndis_filter_receive(struct hv_device *dev,
struct hv_netvsc_packet *pkt);
-
-
-int rndis_filter_send(struct hv_device *dev,
- struct hv_netvsc_packet *pkt);
-
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
@@ -139,6 +136,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
#define NVSP_PROTOCOL_VERSION_1 2
#define NVSP_PROTOCOL_VERSION_2 0x30002
+#define NVSP_PROTOCOL_VERSION_4 0x40000
+#define NVSP_PROTOCOL_VERSION_5 0x50000
enum {
NVSP_MSG_TYPE_NONE = 0,
@@ -193,6 +192,23 @@ enum {
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ NVSP_MSG2_MAX = NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ /* Version 4 messages */
+ NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION,
+ NVSP_MSG4_TYPE_SWITCH_DATA_PATH,
+ NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ NVSP_MSG4_MAX = NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ /* Version 5 messages */
+ NVSP_MSG5_TYPE_OID_QUERY_EX,
+ NVSP_MSG5_TYPE_OID_QUERY_EX_COMP,
+ NVSP_MSG5_TYPE_SUBCHANNEL,
+ NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
+
+ NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
};
enum {
@@ -447,10 +463,44 @@ union nvsp_2_message_uber {
struct nvsp_2_free_rxbuf free_rxbuf;
} __packed;
+enum nvsp_subchannel_operation {
+ NVSP_SUBCHANNEL_NONE = 0,
+ NVSP_SUBCHANNEL_ALLOCATE,
+ NVSP_SUBCHANNEL_MAX
+};
+
+struct nvsp_5_subchannel_request {
+ u32 op;
+ u32 num_subchannels;
+} __packed;
+
+struct nvsp_5_subchannel_complete {
+ u32 status;
+ u32 num_subchannels; /* Actual number of subchannels allocated */
+} __packed;
+
+struct nvsp_5_send_indirect_table {
+ /* The number of entries in the send indirection table */
+ u32 count;
+
+ /* The offset of the send indireciton table from top of this struct.
+ * The send indirection table tells which channel to put the send
+ * traffic on. Each entry is a channel number.
+ */
+ u32 offset;
+} __packed;
+
+union nvsp_5_message_uber {
+ struct nvsp_5_subchannel_request subchn_req;
+ struct nvsp_5_subchannel_complete subchn_comp;
+ struct nvsp_5_send_indirect_table send_table;
+} __packed;
+
union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
union nvsp_2_message_uber v2_msg;
+ union nvsp_5_message_uber v5_msg;
} __packed;
/* ALL Messages */
@@ -463,6 +513,7 @@ struct nvsp_message {
#define NETVSC_MTU 65536
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
@@ -506,6 +557,8 @@ struct netvsc_device {
/* Holds rndis device info */
void *extension;
+ /* The recive buffer for this device */
+ unsigned char cb_buffer[NETVSC_PACKET_SIZE];
};
/* NdisInitialize message */
@@ -671,9 +724,133 @@ struct ndis_pkt_8021q_info {
};
};
+struct ndis_oject_header {
+ u8 type;
+ u8 revision;
+ u16 size;
+};
+
+#define NDIS_OBJECT_TYPE_DEFAULT 0x80
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
+#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
+#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_LSOV1_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED 3
+#define NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED 4
+
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE 1
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4 0
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6 1
+
+/*
+ * New offload OIDs for NDIS 6
+ */
+#define OID_TCP_OFFLOAD_CURRENT_CONFIG 0xFC01020B /* query only */
+#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C /* set only */
+#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D/* query only */
+#define OID_TCP_CONNECTION_OFFLOAD_CURRENT_CONFIG 0xFC01020E /* query only */
+#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
+#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
+
+struct ndis_offload_params {
+ struct ndis_oject_header header;
+ u8 ip_v4_csum;
+ u8 tcp_ip_v4_csum;
+ u8 udp_ip_v4_csum;
+ u8 tcp_ip_v6_csum;
+ u8 udp_ip_v6_csum;
+ u8 lso_v1;
+ u8 ip_sec_v1;
+ u8 lso_v2_ipv4;
+ u8 lso_v2_ipv6;
+ u8 tcp_connection_ip_v4;
+ u8 tcp_connection_ip_v6;
+ u32 flags;
+ u8 ip_sec_v2;
+ u8 ip_sec_v2_ip_v4;
+ struct {
+ u8 rsc_ip_v4;
+ u8 rsc_ip_v6;
+ };
+ struct {
+ u8 encapsulated_packet_task_offload;
+ u8 encapsulation_types;
+ };
+};
+
+struct ndis_tcp_ip_checksum_info {
+ union {
+ struct {
+ u32 is_ipv4:1;
+ u32 is_ipv6:1;
+ u32 tcp_checksum:1;
+ u32 udp_checksum:1;
+ u32 ip_header_checksum:1;
+ u32 reserved:11;
+ u32 tcp_header_offset:10;
+ } transmit;
+ struct {
+ u32 tcp_checksum_failed:1;
+ u32 udp_checksum_failed:1;
+ u32 ip_checksum_failed:1;
+ u32 tcp_checksum_succeeded:1;
+ u32 udp_checksum_succeeded:1;
+ u32 ip_checksum_succeeded:1;
+ u32 loopback:1;
+ u32 tcp_checksum_value_invalid:1;
+ u32 ip_checksum_value_invalid:1;
+ } receive;
+ u32 value;
+ };
+};
+
+struct ndis_tcp_lso_info {
+ union {
+ struct {
+ u32 unused:30;
+ u32 type:1;
+ u32 reserved2:1;
+ } transmit;
+ struct {
+ u32 mss:20;
+ u32 tcp_header_offset:10;
+ u32 type:1;
+ u32 reserved2:1;
+ } lso_v1_transmit;
+ struct {
+ u32 tcp_payload:30;
+ u32 type:1;
+ u32 reserved2:1;
+ } lso_v1_transmit_complete;
+ struct {
+ u32 mss:20;
+ u32 tcp_header_offset:10;
+ u32 type:1;
+ u32 ip_version:1;
+ } lso_v2_transmit;
+ struct {
+ u32 reserved:30;
+ u32 type:1;
+ u32 reserved2:1;
+ } lso_v2_transmit_complete;
+ u32 value;
+ };
+};
+
#define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
sizeof(struct ndis_pkt_8021q_info))
+#define NDIS_CSUM_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(struct ndis_tcp_ip_checksum_info))
+
+#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(struct ndis_tcp_lso_info))
+
/* Format of Information buffer passed in a SetRequest for the OID */
/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
struct rndis_config_parameter_info {
@@ -846,12 +1023,6 @@ struct rndis_message {
};
-struct rndis_filter_packet {
- void *completion_ctx;
- void (*completion)(void *context);
- struct rndis_message msg;
-};
-
/* Handy macros */
/* get the size of an RNDIS message. Pass in the message type, */
@@ -905,6 +1076,16 @@ struct rndis_filter_packet {
#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
+#define INFO_IPV4 2
+#define INFO_IPV6 4
+#define INFO_TCP 2
+#define INFO_UDP 4
+
+#define TRANSPORT_INFO_NOT_IP 0
+#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
+#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
+#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
+#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 03a2c6e17158..daddea2654ce 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -290,7 +290,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
NVSP_STAT_SUCCESS)
return -EINVAL;
- if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
+ if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
return 0;
/* NVSPv2 only: Send NDIS config */
@@ -314,6 +314,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
struct nvsp_message *init_packet;
int ndis_version;
struct net_device *ndev;
+ u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
+ NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
+ int i, num_ver = 4; /* number of different NVSP versions */
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -323,13 +326,14 @@ static int netvsc_connect_vsp(struct hv_device *device)
init_packet = &net_device->channel_init_pkt;
/* Negotiate the latest NVSP protocol supported */
- if (negotiate_nvsp_ver(device, net_device, init_packet,
- NVSP_PROTOCOL_VERSION_2) == 0) {
- net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
- } else if (negotiate_nvsp_ver(device, net_device, init_packet,
- NVSP_PROTOCOL_VERSION_1) == 0) {
- net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
- } else {
+ for (i = num_ver - 1; i >= 0; i--)
+ if (negotiate_nvsp_ver(device, net_device, init_packet,
+ ver_list[i]) == 0) {
+ net_device->nvsp_version = ver_list[i];
+ break;
+ }
+
+ if (i < 0) {
ret = -EPROTO;
goto cleanup;
}
@@ -339,7 +343,10 @@ static int netvsc_connect_vsp(struct hv_device *device)
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
- ndis_version = 0x00050001;
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
+ ndis_version = 0x00050001;
+ else
+ ndis_version = 0x0006001e;
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
init_packet->msg.v1_msg.
@@ -358,6 +365,11 @@ static int netvsc_connect_vsp(struct hv_device *device)
goto cleanup;
/* Post the big receive buffer to NetVSP */
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
+ net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
+ else
+ net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+
ret = netvsc_init_recv_buf(device);
cleanup:
@@ -432,17 +444,14 @@ static inline u32 hv_ringbuf_avail_percent(
return avail_write * 100 / ring_info->ring_datasize;
}
-static void netvsc_send_completion(struct hv_device *device,
+static void netvsc_send_completion(struct netvsc_device *net_device,
+ struct hv_device *device,
struct vmpacket_descriptor *packet)
{
- struct netvsc_device *net_device;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev;
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = net_device->ndev;
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
@@ -561,13 +570,13 @@ int netvsc_send(struct hv_device *device,
}
static void netvsc_send_recv_completion(struct hv_device *device,
+ struct netvsc_device *net_device,
u64 transaction_id, u32 status)
{
struct nvsp_message recvcompMessage;
int retries = 0;
int ret;
struct net_device *ndev;
- struct netvsc_device *net_device = hv_get_drvdata(device);
ndev = net_device->ndev;
@@ -653,14 +662,15 @@ static void netvsc_receive_completion(void *context)
/* Send a receive completion for the xfer page packet */
if (fsend_receive_comp)
- netvsc_send_recv_completion(device, transaction_id, status);
+ netvsc_send_recv_completion(device, net_device, transaction_id,
+ status);
}
-static void netvsc_receive(struct hv_device *device,
- struct vmpacket_descriptor *packet)
+static void netvsc_receive(struct netvsc_device *net_device,
+ struct hv_device *device,
+ struct vmpacket_descriptor *packet)
{
- struct netvsc_device *net_device;
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *netvsc_packet = NULL;
@@ -673,9 +683,6 @@ static void netvsc_receive(struct hv_device *device,
LIST_HEAD(listHead);
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = net_device->ndev;
/*
@@ -741,7 +748,7 @@ static void netvsc_receive(struct hv_device *device,
spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
flags);
- netvsc_send_recv_completion(device,
+ netvsc_send_recv_completion(device, net_device,
vmxferpage_packet->d.trans_id,
NVSP_STAT_FAIL);
@@ -800,22 +807,16 @@ static void netvsc_channel_cb(void *context)
struct netvsc_device *net_device;
u32 bytes_recvd;
u64 request_id;
- unsigned char *packet;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
- packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
- GFP_ATOMIC);
- if (!packet)
- return;
- buffer = packet;
-
net_device = get_inbound_net_device(device);
if (!net_device)
- goto out;
+ return;
ndev = net_device->ndev;
+ buffer = net_device->cb_buffer;
do {
ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
@@ -825,11 +826,13 @@ static void netvsc_channel_cb(void *context)
desc = (struct vmpacket_descriptor *)buffer;
switch (desc->type) {
case VM_PKT_COMP:
- netvsc_send_completion(device, desc);
+ netvsc_send_completion(net_device,
+ device, desc);
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(device, desc);
+ netvsc_receive(net_device,
+ device, desc);
break;
default:
@@ -841,23 +844,16 @@ static void netvsc_channel_cb(void *context)
break;
}
- /* reset */
- if (bufferlen > NETVSC_PACKET_SIZE) {
- kfree(buffer);
- buffer = packet;
- bufferlen = NETVSC_PACKET_SIZE;
- }
} else {
- /* reset */
- if (bufferlen > NETVSC_PACKET_SIZE) {
- kfree(buffer);
- buffer = packet;
- bufferlen = NETVSC_PACKET_SIZE;
- }
-
+ /*
+ * We are done for this pass.
+ */
break;
}
+
} else if (ret == -ENOBUFS) {
+ if (bufferlen > NETVSC_PACKET_SIZE)
+ kfree(buffer);
/* Handle large packet */
buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
if (buffer == NULL) {
@@ -872,8 +868,8 @@ static void netvsc_channel_cb(void *context)
}
} while (1);
-out:
- kfree(buffer);
+ if (bufferlen > NETVSC_PACKET_SIZE)
+ kfree(buffer);
return;
}
@@ -907,7 +903,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
ndev = net_device->ndev;
/* Initialize the NetVSC channel extension */
- net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
spin_lock_init(&net_device->recv_pkt_list_lock);
INIT_LIST_HEAD(&net_device->recv_pkt_list);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d6fce9750b95..4e4cf9e0c8d7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -128,6 +128,27 @@ static int netvsc_close(struct net_device *net)
return ret;
}
+static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
+ int pkt_type)
+{
+ struct rndis_packet *rndis_pkt;
+ struct rndis_per_packet_info *ppi;
+
+ rndis_pkt = &msg->msg.pkt;
+ rndis_pkt->data_offset += ppi_size;
+
+ ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
+ rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
+
+ ppi->size = ppi_size;
+ ppi->type = pkt_type;
+ ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
+
+ rndis_pkt->per_pkt_info_len += ppi_size;
+
+ return ppi;
+}
+
static void netvsc_xmit_completion(void *context)
{
struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
@@ -140,22 +161,164 @@ static void netvsc_xmit_completion(void *context)
dev_kfree_skb_any(skb);
}
+static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
+ struct hv_page_buffer *pb)
+{
+ int j = 0;
+
+ /* Deal with compund pages by ignoring unused part
+ * of the page.
+ */
+ page += (offset >> PAGE_SHIFT);
+ offset &= ~PAGE_MASK;
+
+ while (len > 0) {
+ unsigned long bytes;
+
+ bytes = PAGE_SIZE - offset;
+ if (bytes > len)
+ bytes = len;
+ pb[j].pfn = page_to_pfn(page);
+ pb[j].offset = offset;
+ pb[j].len = bytes;
+
+ offset += bytes;
+ len -= bytes;
+
+ if (offset == PAGE_SIZE && len) {
+ page++;
+ offset = 0;
+ j++;
+ }
+ }
+
+ return j + 1;
+}
+
+static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
+ struct hv_page_buffer *pb)
+{
+ u32 slots_used = 0;
+ char *data = skb->data;
+ int frags = skb_shinfo(skb)->nr_frags;
+ int i;
+
+ /* The packet is laid out thus:
+ * 1. hdr
+ * 2. skb linear data
+ * 3. skb fragment data
+ */
+ if (hdr != NULL)
+ slots_used += fill_pg_buf(virt_to_page(hdr),
+ offset_in_page(hdr),
+ len, &pb[slots_used]);
+
+ slots_used += fill_pg_buf(virt_to_page(data),
+ offset_in_page(data),
+ skb_headlen(skb), &pb[slots_used]);
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+
+ slots_used += fill_pg_buf(skb_frag_page(frag),
+ frag->page_offset,
+ skb_frag_size(frag), &pb[slots_used]);
+ }
+ return slots_used;
+}
+
+static int count_skb_frag_slots(struct sk_buff *skb)
+{
+ int i, frags = skb_shinfo(skb)->nr_frags;
+ int pages = 0;
+
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ unsigned long size = skb_frag_size(frag);
+ unsigned long offset = frag->page_offset;
+
+ /* Skip unused frames from start of page */
+ offset &= ~PAGE_MASK;
+ pages += PFN_UP(offset + size);
+ }
+ return pages;
+}
+
+static int netvsc_get_slots(struct sk_buff *skb)
+{
+ char *data = skb->data;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+ int slots;
+ int frag_slots;
+
+ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ frag_slots = count_skb_frag_slots(skb);
+ return slots + frag_slots;
+}
+
+static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
+{
+ u32 ret_val = TRANSPORT_INFO_NOT_IP;
+
+ if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
+ (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
+ goto not_ip;
+ }
+
+ *trans_off = skb_transport_offset(skb);
+
+ if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
+ struct iphdr *iphdr = ip_hdr(skb);
+
+ if (iphdr->protocol == IPPROTO_TCP)
+ ret_val = TRANSPORT_INFO_IPV4_TCP;
+ else if (iphdr->protocol == IPPROTO_UDP)
+ ret_val = TRANSPORT_INFO_IPV4_UDP;
+ } else {
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ ret_val = TRANSPORT_INFO_IPV6_TCP;
+ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ ret_val = TRANSPORT_INFO_IPV6_UDP;
+ }
+
+not_ip:
+ return ret_val;
+}
+
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet;
int ret;
- unsigned int i, num_pages, npg_data;
-
- /* Add multipages for skb->data and additional 2 for RNDIS */
- npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
- >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
- num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
+ unsigned int num_data_pgs;
+ struct rndis_message *rndis_msg;
+ struct rndis_packet *rndis_pkt;
+ u32 rndis_msg_size;
+ bool isvlan;
+ struct rndis_per_packet_info *ppi;
+ struct ndis_tcp_ip_checksum_info *csum_info;
+ struct ndis_tcp_lso_info *lso_info;
+ int hdr_offset;
+ u32 net_trans_info;
+
+
+ /* We will atmost need two pages to describe the rndis
+ * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
+ * of pages in a single packet.
+ */
+ num_data_pgs = netvsc_get_slots(skb) + 2;
+ if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
+ netdev_err(net, "Packet too big: %u\n", skb->len);
+ dev_kfree_skb(skb);
+ net->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
- (num_pages * sizeof(struct hv_page_buffer)) +
- sizeof(struct rndis_filter_packet) +
+ (num_data_pgs * sizeof(struct hv_page_buffer)) +
+ sizeof(struct rndis_message) +
NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
if (!packet) {
/* out of memory, drop packet */
@@ -168,53 +331,111 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->vlan_tci = skb->vlan_tci;
- packet->extension = (void *)(unsigned long)packet +
+ packet->is_data_pkt = true;
+ packet->total_data_buflen = skb->len;
+
+ packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
sizeof(struct hv_netvsc_packet) +
- (num_pages * sizeof(struct hv_page_buffer));
+ (num_data_pgs * sizeof(struct hv_page_buffer)));
+
+ /* Set the completion routine */
+ packet->completion.send.send_completion = netvsc_xmit_completion;
+ packet->completion.send.send_completion_ctx = packet;
+ packet->completion.send.send_completion_tid = (unsigned long)skb;
- /* If the rndis msg goes beyond 1 page, we will add 1 later */
- packet->page_buf_cnt = num_pages - 1;
+ isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
+
+ /* Add the rndis header */
+ rndis_msg = packet->rndis_msg;
+ rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
+ rndis_msg->msg_len = packet->total_data_buflen;
+ rndis_pkt = &rndis_msg->msg.pkt;
+ rndis_pkt->data_offset = sizeof(struct rndis_packet);
+ rndis_pkt->data_len = packet->total_data_buflen;
+ rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
+
+ rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
+
+ if (isvlan) {
+ struct ndis_pkt_8021q_info *vlan;
+
+ rndis_msg_size += NDIS_VLAN_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
+ IEEE_8021Q_INFO);
+ vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
+ ppi->ppi_offset);
+ vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
+ vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ }
- /* Initialize it from the skb */
- packet->total_data_buflen = skb->len;
+ net_trans_info = get_net_transport_info(skb, &hdr_offset);
+ if (net_trans_info == TRANSPORT_INFO_NOT_IP)
+ goto do_send;
+
+ /*
+ * Setup the sendside checksum offload only if this is not a
+ * GSO packet.
+ */
+ if (skb_is_gso(skb))
+ goto do_lso;
+
+ rndis_msg_size += NDIS_CSUM_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+ TCPIP_CHKSUM_PKTINFO);
+
+ csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
+ ppi->ppi_offset);
- /* Start filling in the page buffers starting after RNDIS buffer. */
- packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
- packet->page_buf[1].offset
- = (unsigned long)skb->data & (PAGE_SIZE - 1);
- if (npg_data == 1)
- packet->page_buf[1].len = skb_headlen(skb);
+ if (net_trans_info & (INFO_IPV4 << 16))
+ csum_info->transmit.is_ipv4 = 1;
else
- packet->page_buf[1].len = PAGE_SIZE
- - packet->page_buf[1].offset;
-
- for (i = 2; i <= npg_data; i++) {
- packet->page_buf[i].pfn = virt_to_phys(skb->data
- + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
- packet->page_buf[i].offset = 0;
- packet->page_buf[i].len = PAGE_SIZE;
+ csum_info->transmit.is_ipv6 = 1;
+
+ if (net_trans_info & INFO_TCP) {
+ csum_info->transmit.tcp_checksum = 1;
+ csum_info->transmit.tcp_header_offset = hdr_offset;
+ } else if (net_trans_info & INFO_UDP) {
+ csum_info->transmit.udp_checksum = 1;
}
- if (npg_data > 1)
- packet->page_buf[npg_data].len = (((unsigned long)skb->data
- + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
-
- /* Additional fragments are after SKB data */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
-
- packet->page_buf[i+npg_data+1].pfn =
- page_to_pfn(skb_frag_page(f));
- packet->page_buf[i+npg_data+1].offset = f->page_offset;
- packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
+ goto do_send;
+
+do_lso:
+ rndis_msg_size += NDIS_LSO_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
+ TCP_LARGESEND_PKTINFO);
+
+ lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
+ ppi->ppi_offset);
+
+ lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
+ if (net_trans_info & (INFO_IPV4 << 16)) {
+ lso_info->lso_v2_transmit.ip_version =
+ NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
+ ip_hdr(skb)->tot_len = 0;
+ ip_hdr(skb)->check = 0;
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ } else {
+ lso_info->lso_v2_transmit.ip_version =
+ NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
+ lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
+ lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
- /* Set the completion routine */
- packet->completion.send.send_completion = netvsc_xmit_completion;
- packet->completion.send.send_completion_ctx = packet;
- packet->completion.send.send_completion_tid = (unsigned long)skb;
+do_send:
+ /* Start filling in the page buffers with the rndis hdr */
+ rndis_msg->msg_len += rndis_msg_size;
+ packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
+ skb, &packet->page_buf[0]);
+
+ ret = netvsc_send(net_device_ctx->device_ctx, packet);
- ret = rndis_filter_send(net_device_ctx->device_ctx,
- packet);
if (ret == 0) {
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
@@ -264,7 +485,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
* "wire" on the specified device.
*/
int netvsc_recv_callback(struct hv_device *device_obj,
- struct hv_netvsc_packet *packet)
+ struct hv_netvsc_packet *packet,
+ struct ndis_tcp_ip_checksum_info *csum_info)
{
struct net_device *net;
struct sk_buff *skb;
@@ -291,7 +513,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
- skb->ip_summed = CHECKSUM_NONE;
+ if (csum_info) {
+ /* We only look at the IP checksum here.
+ * Should we be dropping the packet if checksum
+ * failed? How do we deal with other checksums - TCP/UDP?
+ */
+ if (csum_info->receive.ip_checksum_succeeded)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
if (packet->vlan_tci & VLAN_TAG_PRESENT)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
packet->vlan_tci);
@@ -327,7 +559,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (nvdev == NULL || nvdev->destroy)
return -ENODEV;
- if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
+ if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
limit = NETVSC_MTU;
if (mtu < 68 || mtu > limit)
@@ -452,9 +684,10 @@ static int netvsc_probe(struct hv_device *dev,
net->netdev_ops = &device_ops;
- /* TODO: Add GSO and Checksum offload */
- net->hw_features = 0;
- net->features = NETIF_F_HW_VLAN_CTAG_TX;
+ net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO;
+ net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_TSO;
SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index b54fd257652b..4a37e3db9e32 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -58,9 +58,6 @@ struct rndis_request {
u8 request_ext[RNDIS_EXT_LEN];
};
-static void rndis_filter_send_completion(void *ctx);
-
-
static struct rndis_device *get_rndis_device(void)
{
struct rndis_device *device;
@@ -297,7 +294,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
"rndis response buffer overflow "
"detected (size %u max %zu)\n",
resp->msg_len,
- sizeof(struct rndis_filter_packet));
+ sizeof(struct rndis_message));
if (resp->ndis_msg_type ==
RNDIS_MSG_RESET_C) {
@@ -373,6 +370,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
struct rndis_packet *rndis_pkt;
u32 data_offset;
struct ndis_pkt_8021q_info *vlan;
+ struct ndis_tcp_ip_checksum_info *csum_info;
rndis_pkt = &msg->msg.pkt;
@@ -411,7 +409,8 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
pkt->vlan_tci = 0;
}
- netvsc_recv_callback(dev->net_dev->dev, pkt);
+ csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
+ netvsc_recv_callback(dev->net_dev->dev, pkt, csum_info);
}
int rndis_filter_receive(struct hv_device *dev,
@@ -630,6 +629,61 @@ cleanup:
return ret;
}
+int rndis_filter_set_offload_params(struct hv_device *hdev,
+ struct ndis_offload_params *req_offloads)
+{
+ struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct rndis_device *rdev = nvdev->extension;
+ struct net_device *ndev = nvdev->ndev;
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct ndis_offload_params *offload_params;
+ struct rndis_set_complete *set_complete;
+ u32 extlen = sizeof(struct ndis_offload_params);
+ int ret, t;
+
+ request = get_rndis_request(rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = OID_TCP_OFFLOAD_PARAMETERS;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ offload_params = (struct ndis_offload_params *)((ulong)set +
+ set->info_buf_offset);
+ *offload_params = *req_offloads;
+ offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
+ offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
+ offload_params->header.size = extlen;
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n");
+ /* can't put_rndis_request, since we may still receive a
+ * send-completion.
+ */
+ return -EBUSY;
+ } else {
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
+ }
+ }
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
@@ -829,6 +883,7 @@ int rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct netvsc_device_info *device_info = additional_info;
+ struct ndis_offload_params offloads;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -868,6 +923,26 @@ int rndis_filter_device_add(struct hv_device *dev,
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
+ /* Turn on the offloads; the host supports all of the relevant
+ * offloads.
+ */
+ memset(&offloads, 0, sizeof(struct ndis_offload_params));
+ /* A value of zero means "no change"; now turn on what we
+ * want.
+ */
+ offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+
+
+ ret = rndis_filter_set_offload_params(dev, &offloads);
+ if (ret)
+ goto err_dev_remv;
+
+
rndis_filter_query_device_link_status(rndis_device);
device_info->link_state = rndis_device->link_state;
@@ -877,6 +952,10 @@ int rndis_filter_device_add(struct hv_device *dev,
device_info->link_state ? "down" : "up");
return ret;
+
+err_dev_remv:
+ rndis_filter_device_remove(dev);
+ return ret;
}
void rndis_filter_device_remove(struct hv_device *dev)
@@ -913,101 +992,3 @@ int rndis_filter_close(struct hv_device *dev)
return rndis_filter_close_device(nvdev->extension);
}
-
-int rndis_filter_send(struct hv_device *dev,
- struct hv_netvsc_packet *pkt)
-{
- int ret;
- struct rndis_filter_packet *filter_pkt;
- struct rndis_message *rndis_msg;
- struct rndis_packet *rndis_pkt;
- u32 rndis_msg_size;
- bool isvlan = pkt->vlan_tci & VLAN_TAG_PRESENT;
-
- /* Add the rndis header */
- filter_pkt = (struct rndis_filter_packet *)pkt->extension;
-
- rndis_msg = &filter_pkt->msg;
- rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
- if (isvlan)
- rndis_msg_size += NDIS_VLAN_PPI_SIZE;
-
- rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
- rndis_msg->msg_len = pkt->total_data_buflen +
- rndis_msg_size;
-
- rndis_pkt = &rndis_msg->msg.pkt;
- rndis_pkt->data_offset = sizeof(struct rndis_packet);
- if (isvlan)
- rndis_pkt->data_offset += NDIS_VLAN_PPI_SIZE;
- rndis_pkt->data_len = pkt->total_data_buflen;
-
- if (isvlan) {
- struct rndis_per_packet_info *ppi;
- struct ndis_pkt_8021q_info *vlan;
-
- rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
- rndis_pkt->per_pkt_info_len = NDIS_VLAN_PPI_SIZE;
-
- ppi = (struct rndis_per_packet_info *)((ulong)rndis_pkt +
- rndis_pkt->per_pkt_info_offset);
- ppi->size = NDIS_VLAN_PPI_SIZE;
- ppi->type = IEEE_8021Q_INFO;
- ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
-
- vlan = (struct ndis_pkt_8021q_info *)((ulong)ppi +
- ppi->ppi_offset);
- vlan->vlanid = pkt->vlan_tci & VLAN_VID_MASK;
- vlan->pri = (pkt->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- }
-
- pkt->is_data_pkt = true;
- pkt->page_buf[0].pfn = virt_to_phys(rndis_msg) >> PAGE_SHIFT;
- pkt->page_buf[0].offset =
- (unsigned long)rndis_msg & (PAGE_SIZE-1);
- pkt->page_buf[0].len = rndis_msg_size;
-
- /* Add one page_buf if the rndis msg goes beyond page boundary */
- if (pkt->page_buf[0].offset + rndis_msg_size > PAGE_SIZE) {
- int i;
- for (i = pkt->page_buf_cnt; i > 1; i--)
- pkt->page_buf[i] = pkt->page_buf[i-1];
- pkt->page_buf_cnt++;
- pkt->page_buf[0].len = PAGE_SIZE - pkt->page_buf[0].offset;
- pkt->page_buf[1].pfn = virt_to_phys((void *)((ulong)
- rndis_msg + pkt->page_buf[0].len)) >> PAGE_SHIFT;
- pkt->page_buf[1].offset = 0;
- pkt->page_buf[1].len = rndis_msg_size - pkt->page_buf[0].len;
- }
-
- /* Save the packet send completion and context */
- filter_pkt->completion = pkt->completion.send.send_completion;
- filter_pkt->completion_ctx =
- pkt->completion.send.send_completion_ctx;
-
- /* Use ours */
- pkt->completion.send.send_completion = rndis_filter_send_completion;
- pkt->completion.send.send_completion_ctx = filter_pkt;
-
- ret = netvsc_send(dev, pkt);
- if (ret != 0) {
- /*
- * Reset the completion to originals to allow retries from
- * above
- */
- pkt->completion.send.send_completion =
- filter_pkt->completion;
- pkt->completion.send.send_completion_ctx =
- filter_pkt->completion_ctx;
- }
-
- return ret;
-}
-
-static void rndis_filter_send_completion(void *ctx)
-{
- struct rndis_filter_packet *filter_pkt = ctx;
-
- /* Pass it back to the original handler */
- filter_pkt->completion(filter_pkt->completion_ctx);
-}
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 08ae4655423a..3e89beab64fd 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -15,9 +15,9 @@ config IEEE802154_FAKEHARD
depends on IEEE802154_DRIVERS
---help---
Say Y here to enable the fake driver that serves as an example
- of HardMAC device driver.
+ of HardMAC device driver.
- This driver can also be built as a module. To do so say M here.
+ This driver can also be built as a module. To do so say M here.
The module will be called 'fakehard'.
config IEEE802154_FAKELB
@@ -31,17 +31,23 @@ config IEEE802154_FAKELB
The module will be called 'fakelb'.
config IEEE802154_AT86RF230
- depends on IEEE802154_DRIVERS && MAC802154
- tristate "AT86RF230/231 transceiver driver"
- depends on SPI
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "AT86RF230/231/233/212 transceiver driver"
+ depends on SPI
+ ---help---
+ Say Y here to enable the at86rf230/231/233/212 SPI 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so, say M here.
+ the module will be called 'at86rf230'.
config IEEE802154_MRF24J40
- tristate "Microchip MRF24J40 transceiver driver"
- depends on IEEE802154_DRIVERS && MAC802154
- depends on SPI
- ---help---
- Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
- controller.
-
- This driver can also be built as a module. To do so, say M here.
- the module will be called 'mrf24j40'.
+ tristate "Microchip MRF24J40 transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
+ ---help---
+ Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so, say M here.
+ the module will be called 'mrf24j40'.
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index a30258aad139..89417ac41083 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -31,13 +31,13 @@
#include <linux/spi/spi.h>
#include <linux/spi/at86rf230.h>
#include <linux/skbuff.h>
+#include <linux/of_gpio.h>
#include <net/mac802154.h>
#include <net/wpan-phy.h>
struct at86rf230_local {
struct spi_device *spi;
- int rstn, slp_tr, dig2;
u8 part;
u8 vers;
@@ -53,8 +53,16 @@ struct at86rf230_local {
spinlock_t lock;
bool irq_busy;
bool is_tx;
+ bool tx_aret;
+
+ int rssi_base_val;
};
+static bool is_rf212(struct at86rf230_local *local)
+{
+ return local->part == 7;
+}
+
#define RG_TRX_STATUS (0x01)
#define SR_TRX_STATUS 0x01, 0x1f, 0
#define SR_RESERVED_01_3 0x01, 0x20, 5
@@ -100,7 +108,10 @@ struct at86rf230_local {
#define SR_SFD_VALUE 0x0b, 0xff, 0
#define RG_TRX_CTRL_2 (0x0c)
#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
-#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
+#define SR_SUB_MODE 0x0c, 0x04, 2
+#define SR_BPSK_QPSK 0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
+#define SR_RESERVED_0c_5 0x0c, 0x60, 5
#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
#define RG_ANT_DIV (0x0d)
#define SR_ANT_CTRL 0x0d, 0x03, 0
@@ -145,7 +156,7 @@ struct at86rf230_local {
#define SR_RESERVED_17_5 0x17, 0x08, 3
#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
-#define SR_RESERVED_17_2 0x17, 0x40, 6
+#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
#define SR_RESERVED_17_1 0x17, 0x80, 7
#define RG_FTN_CTRL (0x18)
#define SR_RESERVED_18_2 0x18, 0x7f, 0
@@ -234,6 +245,7 @@ struct at86rf230_local {
#define STATE_TX_ON 0x09
/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
#define STATE_SLEEP 0x0F
+#define STATE_PREP_DEEP_SLEEP 0x10
#define STATE_BUSY_RX_AACK 0x11
#define STATE_BUSY_TX_ARET 0x12
#define STATE_RX_AACK_ON 0x16
@@ -244,6 +256,57 @@ struct at86rf230_local {
#define STATE_TRANSITION_IN_PROGRESS 0x1F
static int
+__at86rf230_detect_device(struct spi_device *spi, u16 *man_id, u8 *part,
+ u8 *version)
+{
+ u8 data[4];
+ u8 *buf = kmalloc(2, GFP_KERNEL);
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ u8 reg;
+
+ if (!buf)
+ return -ENOMEM;
+
+ for (reg = RG_PART_NUM; reg <= RG_MAN_ID_1; reg++) {
+ buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
+ buf[1] = 0xff;
+ dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(spi, &msg);
+ dev_vdbg(&spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&spi->dev, "status = %d\n", status);
+ dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status == 0)
+ data[reg - RG_PART_NUM] = buf[1];
+ else
+ break;
+ }
+
+ if (status == 0) {
+ *part = data[0];
+ *version = data[1];
+ *man_id = (data[3] << 8) | data[2];
+ }
+
+ kfree(buf);
+
+ return status;
+}
+
+static int
__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
{
u8 *buf = lp->buf;
@@ -489,7 +552,9 @@ at86rf230_state(struct ieee802154_dev *dev, int state)
} while (val == STATE_TRANSITION_IN_PROGRESS);
- if (val == desired_status)
+ if (val == desired_status ||
+ (desired_status == STATE_RX_ON && val == STATE_BUSY_RX) ||
+ (desired_status == STATE_RX_AACK_ON && val == STATE_BUSY_RX_AACK))
return 0;
pr_err("unexpected state change: %d, asked for %d\n", val, state);
@@ -510,7 +575,11 @@ at86rf230_start(struct ieee802154_dev *dev)
if (rc)
return rc;
- return at86rf230_state(dev, STATE_RX_ON);
+ rc = at86rf230_state(dev, STATE_TX_ON);
+ if (rc)
+ return rc;
+
+ return at86rf230_state(dev, STATE_RX_AACK_ON);
}
static void
@@ -520,6 +589,39 @@ at86rf230_stop(struct ieee802154_dev *dev)
}
static int
+at86rf230_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+ lp->rssi_base_val = -91;
+
+ return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
+static int
+at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+ int rc;
+
+ if (channel == 0)
+ rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 0);
+ else
+ rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 1);
+ if (rc < 0)
+ return rc;
+
+ if (page == 0) {
+ rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 0);
+ lp->rssi_base_val = -100;
+ } else {
+ rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 1);
+ lp->rssi_base_val = -98;
+ }
+ if (rc < 0)
+ return rc;
+
+ return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
+static int
at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
{
struct at86rf230_local *lp = dev->priv;
@@ -527,14 +629,22 @@ at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
might_sleep();
- if (page != 0 || channel < 11 || channel > 26) {
+ if (page < 0 || page > 31 ||
+ !(lp->dev->phy->channels_supported[page] & BIT(channel))) {
WARN_ON(1);
return -EINVAL;
}
- rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+ if (is_rf212(lp))
+ rc = at86rf212_set_channel(lp, page, channel);
+ else
+ rc = at86rf230_set_channel(lp, page, channel);
+ if (rc < 0)
+ return rc;
+
msleep(1); /* Wait for PLL */
dev->phy->current_channel = channel;
+ dev->phy->current_page = page;
return 0;
}
@@ -568,6 +678,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
if (rc)
goto err_rx;
+ if (lp->tx_aret) {
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ARET_ON);
+ if (rc)
+ goto err_rx;
+ }
+
rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
if (rc)
goto err_rx;
@@ -630,30 +746,31 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
struct at86rf230_local *lp = dev->priv;
if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ u16 addr = le16_to_cpu(filt->short_addr);
+
dev_vdbg(&lp->spi->dev,
"at86rf230_set_hw_addr_filt called for saddr\n");
- __at86rf230_write(lp, RG_SHORT_ADDR_0, filt->short_addr);
- __at86rf230_write(lp, RG_SHORT_ADDR_1, filt->short_addr >> 8);
+ __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
+ __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ u16 pan = le16_to_cpu(filt->pan_id);
+
dev_vdbg(&lp->spi->dev,
"at86rf230_set_hw_addr_filt called for pan id\n");
- __at86rf230_write(lp, RG_PAN_ID_0, filt->pan_id);
- __at86rf230_write(lp, RG_PAN_ID_1, filt->pan_id >> 8);
+ __at86rf230_write(lp, RG_PAN_ID_0, pan);
+ __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ u8 i, addr[8];
+
+ memcpy(addr, &filt->ieee_addr, 8);
dev_vdbg(&lp->spi->dev,
"at86rf230_set_hw_addr_filt called for IEEE addr\n");
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_0, filt->ieee_addr[7]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_1, filt->ieee_addr[6]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_2, filt->ieee_addr[5]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_3, filt->ieee_addr[4]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_4, filt->ieee_addr[3]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_5, filt->ieee_addr[2]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_6, filt->ieee_addr[1]);
- at86rf230_write_subreg(lp, SR_IEEE_ADDR_7, filt->ieee_addr[0]);
+ for (i = 0; i < 8; i++)
+ __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802515_AFILT_PANC_CHANGED) {
@@ -668,6 +785,93 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
return 0;
}
+static int
+at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
+ * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
+ * 0dB.
+ * thus, supported values for db range from -26 to 5, for 31dB of
+ * reduction to 0dB of reduction.
+ */
+ if (db > 5 || db < -26)
+ return -EINVAL;
+
+ db = -(db - 5);
+
+ return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+}
+
+static int
+at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on);
+}
+
+static int
+at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
+}
+
+static int
+at86rf212_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int desens_steps;
+
+ if (level < lp->rssi_base_val || level > 30)
+ return -EINVAL;
+
+ desens_steps = (level - lp->rssi_base_val) * 100 / 207;
+
+ return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, desens_steps);
+}
+
+static int
+at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
+ u8 retries)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+
+ if (min_be > max_be || max_be > 8 || retries > 5)
+ return -EINVAL;
+
+ rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be);
+ if (rc)
+ return rc;
+
+ rc = at86rf230_write_subreg(lp, SR_MAX_BE, max_be);
+ if (rc)
+ return rc;
+
+ return at86rf230_write_subreg(lp, SR_MAX_CSMA_RETRIES, max_be);
+}
+
+static int
+at86rf212_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc = 0;
+
+ if (retries < -1 || retries > 15)
+ return -EINVAL;
+
+ lp->tx_aret = retries >= 0;
+
+ if (retries >= 0)
+ rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
+
+ return rc;
+}
+
static struct ieee802154_ops at86rf230_ops = {
.owner = THIS_MODULE,
.xmit = at86rf230_xmit,
@@ -678,6 +882,22 @@ static struct ieee802154_ops at86rf230_ops = {
.set_hw_addr_filt = at86rf230_set_hw_addr_filt,
};
+static struct ieee802154_ops at86rf212_ops = {
+ .owner = THIS_MODULE,
+ .xmit = at86rf230_xmit,
+ .ed = at86rf230_ed,
+ .set_channel = at86rf230_channel,
+ .start = at86rf230_start,
+ .stop = at86rf230_stop,
+ .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
+ .set_txpower = at86rf212_set_txpower,
+ .set_lbt = at86rf212_set_lbt,
+ .set_cca_mode = at86rf212_set_cca_mode,
+ .set_cca_ed_level = at86rf212_set_cca_ed_level,
+ .set_csma_params = at86rf212_set_csma_params,
+ .set_frame_retries = at86rf212_set_frame_retries,
+};
+
static void at86rf230_irqwork(struct work_struct *work)
{
struct at86rf230_local *lp =
@@ -695,8 +915,8 @@ static void at86rf230_irqwork(struct work_struct *work)
status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
if (status & IRQ_TRX_END) {
- spin_lock_irqsave(&lp->lock, flags);
status &= ~IRQ_TRX_END;
+ spin_lock_irqsave(&lp->lock, flags);
if (lp->is_tx) {
lp->is_tx = 0;
spin_unlock_irqrestore(&lp->lock, flags);
@@ -753,22 +973,15 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
int rc, irq_pol;
u8 status;
+ u8 csma_seed[2];
rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
if (rc)
return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
- if (status == STATE_P_ON) {
- rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
- if (rc)
- return rc;
- msleep(1);
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
- }
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
+ if (rc)
+ return rc;
/* configure irq polarity, defaults to high active */
if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
@@ -784,6 +997,14 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
if (rc)
return rc;
+ get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed));
+ rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]);
+ if (rc)
+ return rc;
+ rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_1, csma_seed[1]);
+ if (rc)
+ return rc;
+
/* CLKM changes are applied immediately */
rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
if (rc)
@@ -796,16 +1017,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
/* Wait the next SLEEP cycle */
msleep(100);
- rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
- if (rc)
- return rc;
- msleep(1);
-
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
-
rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
if (rc)
return rc;
@@ -825,14 +1036,38 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return 0;
}
-static void at86rf230_fill_data(struct spi_device *spi)
+static struct at86rf230_platform_data *
+at86rf230_get_pdata(struct spi_device *spi)
{
- struct at86rf230_local *lp = spi_get_drvdata(spi);
- struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+ struct at86rf230_platform_data *pdata;
+ const char *irq_type;
+
+ if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
+ return spi->dev.platform_data;
+
+ pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto done;
+
+ pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
+ pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
+
+ pdata->irq_type = IRQF_TRIGGER_RISING;
+ of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
+ if (!strcmp(irq_type, "level-high"))
+ pdata->irq_type = IRQF_TRIGGER_HIGH;
+ else if (!strcmp(irq_type, "level-low"))
+ pdata->irq_type = IRQF_TRIGGER_LOW;
+ else if (!strcmp(irq_type, "edge-rising"))
+ pdata->irq_type = IRQF_TRIGGER_RISING;
+ else if (!strcmp(irq_type, "edge-falling"))
+ pdata->irq_type = IRQF_TRIGGER_FALLING;
+ else
+ dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
- lp->rstn = pdata->rstn;
- lp->slp_tr = pdata->slp_tr;
- lp->dig2 = pdata->dig2;
+ spi->dev.platform_data = pdata;
+done:
+ return pdata;
}
static int at86rf230_probe(struct spi_device *spi)
@@ -840,133 +1075,146 @@ static int at86rf230_probe(struct spi_device *spi)
struct at86rf230_platform_data *pdata;
struct ieee802154_dev *dev;
struct at86rf230_local *lp;
- u8 man_id_0, man_id_1, status;
+ u16 man_id = 0;
+ u8 part = 0, version = 0, status;
irq_handler_t irq_handler;
work_func_t irq_worker;
- int rc, supported = 0;
+ int rc;
const char *chip;
+ struct ieee802154_ops *ops = NULL;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ specified\n");
return -EINVAL;
}
- pdata = spi->dev.platform_data;
+ pdata = at86rf230_get_pdata(spi);
if (!pdata) {
dev_err(&spi->dev, "no platform_data\n");
return -EINVAL;
}
- dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
- if (!dev)
- return -ENOMEM;
-
- lp = dev->priv;
- lp->dev = dev;
-
- lp->spi = spi;
-
- dev->parent = &spi->dev;
- dev->extra_tx_headroom = 0;
- /* We do support only 2.4 Ghz */
- dev->phy->channels_supported[0] = 0x7FFF800;
- dev->flags = IEEE802154_HW_OMIT_CKSUM;
-
- if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- irq_worker = at86rf230_irqwork;
- irq_handler = at86rf230_isr;
- } else {
- irq_worker = at86rf230_irqwork_level;
- irq_handler = at86rf230_isr_level;
+ if (gpio_is_valid(pdata->rstn)) {
+ rc = gpio_request(pdata->rstn, "rstn");
+ if (rc)
+ return rc;
}
- mutex_init(&lp->bmux);
- INIT_WORK(&lp->irqwork, irq_worker);
- spin_lock_init(&lp->lock);
- init_completion(&lp->tx_complete);
-
- spi_set_drvdata(spi, lp);
-
- at86rf230_fill_data(spi);
-
- rc = gpio_request(lp->rstn, "rstn");
- if (rc)
- goto err_rstn;
-
- if (gpio_is_valid(lp->slp_tr)) {
- rc = gpio_request(lp->slp_tr, "slp_tr");
+ if (gpio_is_valid(pdata->slp_tr)) {
+ rc = gpio_request(pdata->slp_tr, "slp_tr");
if (rc)
goto err_slp_tr;
}
- rc = gpio_direction_output(lp->rstn, 1);
- if (rc)
- goto err_gpio_dir;
+ if (gpio_is_valid(pdata->rstn)) {
+ rc = gpio_direction_output(pdata->rstn, 1);
+ if (rc)
+ goto err_gpio_dir;
+ }
- if (gpio_is_valid(lp->slp_tr)) {
- rc = gpio_direction_output(lp->slp_tr, 0);
+ if (gpio_is_valid(pdata->slp_tr)) {
+ rc = gpio_direction_output(pdata->slp_tr, 0);
if (rc)
goto err_gpio_dir;
}
/* Reset */
- msleep(1);
- gpio_set_value(lp->rstn, 0);
- msleep(1);
- gpio_set_value(lp->rstn, 1);
- msleep(1);
+ if (gpio_is_valid(pdata->rstn)) {
+ udelay(1);
+ gpio_set_value(pdata->rstn, 0);
+ udelay(1);
+ gpio_set_value(pdata->rstn, 1);
+ usleep_range(120, 240);
+ }
- rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
- if (rc)
- goto err_gpio_dir;
- rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
- if (rc)
+ rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
+ if (rc < 0)
goto err_gpio_dir;
- if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+ if (man_id != 0x001f) {
dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
- man_id_1, man_id_0);
+ man_id >> 8, man_id & 0xFF);
rc = -EINVAL;
goto err_gpio_dir;
}
- rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
- if (rc)
- goto err_gpio_dir;
-
- rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
- if (rc)
- goto err_gpio_dir;
-
- switch (lp->part) {
+ switch (part) {
case 2:
chip = "at86rf230";
- /* supported = 1; FIXME: should be easy to support; */
+ /* FIXME: should be easy to support; */
break;
case 3:
chip = "at86rf231";
- supported = 1;
+ ops = &at86rf230_ops;
+ break;
+ case 7:
+ chip = "at86rf212";
+ if (version == 1)
+ ops = &at86rf212_ops;
+ break;
+ case 11:
+ chip = "at86rf233";
+ ops = &at86rf230_ops;
break;
default:
chip = "UNKNOWN";
break;
}
- dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
- if (!supported) {
+ dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
+ if (!ops) {
rc = -ENOTSUPP;
goto err_gpio_dir;
}
+ dev = ieee802154_alloc_device(sizeof(*lp), ops);
+ if (!dev) {
+ rc = -ENOMEM;
+ goto err_gpio_dir;
+ }
+
+ lp = dev->priv;
+ lp->dev = dev;
+ lp->part = part;
+ lp->vers = version;
+
+ lp->spi = spi;
+
+ dev->parent = &spi->dev;
+ dev->extra_tx_headroom = 0;
+ dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
+
+ if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ irq_worker = at86rf230_irqwork;
+ irq_handler = at86rf230_isr;
+ } else {
+ irq_worker = at86rf230_irqwork_level;
+ irq_handler = at86rf230_isr_level;
+ }
+
+ mutex_init(&lp->bmux);
+ INIT_WORK(&lp->irqwork, irq_worker);
+ spin_lock_init(&lp->lock);
+ init_completion(&lp->tx_complete);
+
+ spi_set_drvdata(spi, lp);
+
+ if (is_rf212(lp)) {
+ dev->phy->channels_supported[0] = 0x00007FF;
+ dev->phy->channels_supported[2] = 0x00007FF;
+ } else {
+ dev->phy->channels_supported[0] = 0x7FFF800;
+ }
+
rc = at86rf230_hw_init(lp);
if (rc)
- goto err_gpio_dir;
+ goto err_hw_init;
rc = request_irq(spi->irq, irq_handler,
IRQF_SHARED | pdata->irq_type,
dev_name(&spi->dev), lp);
if (rc)
- goto err_gpio_dir;
+ goto err_hw_init;
/* Read irq status register to reset irq line */
rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
@@ -981,30 +1229,37 @@ static int at86rf230_probe(struct spi_device *spi)
err_irq:
free_irq(spi->irq, lp);
+err_hw_init:
flush_work(&lp->irqwork);
-err_gpio_dir:
- if (gpio_is_valid(lp->slp_tr))
- gpio_free(lp->slp_tr);
-err_slp_tr:
- gpio_free(lp->rstn);
-err_rstn:
+ spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
+
+err_gpio_dir:
+ if (gpio_is_valid(pdata->slp_tr))
+ gpio_free(pdata->slp_tr);
+err_slp_tr:
+ if (gpio_is_valid(pdata->rstn))
+ gpio_free(pdata->rstn);
return rc;
}
static int at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
+ struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+ /* mask all at86rf230 irq's */
+ at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
ieee802154_unregister_device(lp->dev);
free_irq(spi->irq, lp);
flush_work(&lp->irqwork);
- if (gpio_is_valid(lp->slp_tr))
- gpio_free(lp->slp_tr);
- gpio_free(lp->rstn);
+ if (gpio_is_valid(pdata->slp_tr))
+ gpio_free(pdata->slp_tr);
+ if (gpio_is_valid(pdata->rstn))
+ gpio_free(pdata->rstn);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
@@ -1013,8 +1268,19 @@ static int at86rf230_remove(struct spi_device *spi)
return 0;
}
+#if IS_ENABLED(CONFIG_OF)
+static struct of_device_id at86rf230_of_match[] = {
+ { .compatible = "atmel,at86rf230", },
+ { .compatible = "atmel,at86rf231", },
+ { .compatible = "atmel,at86rf233", },
+ { .compatible = "atmel,at86rf212", },
+ { },
+};
+#endif
+
static struct spi_driver at86rf230_driver = {
.driver = {
+ .of_match_table = of_match_ptr(at86rf230_of_match),
.name = "at86rf230",
.owner = THIS_MODULE,
},
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index bf0d55e2dd63..78f18be3bbf2 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -63,11 +63,11 @@ static struct wpan_phy *fake_get_phy(const struct net_device *dev)
*
* Return the ID of the PAN from the PIB.
*/
-static u16 fake_get_pan_id(const struct net_device *dev)
+static __le16 fake_get_pan_id(const struct net_device *dev)
{
BUG_ON(dev->type != ARPHRD_IEEE802154);
- return 0xeba1;
+ return cpu_to_le16(0xeba1);
}
/**
@@ -78,11 +78,11 @@ static u16 fake_get_pan_id(const struct net_device *dev)
* device. If the device has not yet had a short address assigned
* then this should return 0xFFFF to indicate a lack of association.
*/
-static u16 fake_get_short_addr(const struct net_device *dev)
+static __le16 fake_get_short_addr(const struct net_device *dev)
{
BUG_ON(dev->type != ARPHRD_IEEE802154);
- return 0x1;
+ return cpu_to_le16(0x1);
}
/**
@@ -149,7 +149,7 @@ static int fake_assoc_req(struct net_device *dev,
* 802.15.4-2006 document.
*/
static int fake_assoc_resp(struct net_device *dev,
- struct ieee802154_addr *addr, u16 short_addr, u8 status)
+ struct ieee802154_addr *addr, __le16 short_addr, u8 status)
{
return 0;
}
@@ -191,10 +191,10 @@ static int fake_disassoc_req(struct net_device *dev,
* Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006
* document, with 7.3.8 describing coordinator realignment.
*/
-static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
- u8 channel, u8 page,
- u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
- u8 coord_realign)
+static int fake_start_req(struct net_device *dev,
+ struct ieee802154_addr *addr, u8 channel, u8 page,
+ u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
+ u8 coord_realign)
{
struct wpan_phy *phy = fake_to_phy(dev);
@@ -281,8 +281,8 @@ static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
switch (cmd) {
case SIOCGIFADDR:
/* FIXME: fixed here, get from device IRL */
- pan_id = fake_get_pan_id(dev);
- short_addr = fake_get_short_addr(dev);
+ pan_id = le16_to_cpu(fake_get_pan_id(dev));
+ short_addr = le16_to_cpu(fake_get_short_addr(dev));
if (pan_id == IEEE802154_PANID_BROADCAST ||
short_addr == IEEE802154_ADDR_BROADCAST)
return -EADDRNOTAVAIL;
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 246befa4ba05..78a6552ed707 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -465,8 +465,8 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
/* Short Addr */
u8 addrh, addrl;
- addrh = filt->short_addr >> 8 & 0xff;
- addrl = filt->short_addr & 0xff;
+ addrh = le16_to_cpu(filt->short_addr) >> 8 & 0xff;
+ addrl = le16_to_cpu(filt->short_addr) & 0xff;
write_short_reg(devrec, REG_SADRH, addrh);
write_short_reg(devrec, REG_SADRL, addrl);
@@ -476,15 +476,16 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
/* Device Address */
- int i;
+ u8 i, addr[8];
+
+ memcpy(addr, &filt->ieee_addr, 8);
for (i = 0; i < 8; i++)
- write_short_reg(devrec, REG_EADR0+i,
- filt->ieee_addr[7-i]);
+ write_short_reg(devrec, REG_EADR0 + i, addr[i]);
#ifdef DEBUG
printk(KERN_DEBUG "Set long addr to: ");
for (i = 0; i < 8; i++)
- printk("%02hhx ", filt->ieee_addr[i]);
+ printk("%02hhx ", addr[7 - i]);
printk(KERN_DEBUG "\n");
#endif
}
@@ -492,8 +493,8 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
if (changed & IEEE802515_AFILT_PANID_CHANGED) {
/* PAN ID */
u8 panidl, panidh;
- panidh = filt->pan_id >> 8 & 0xff;
- panidl = filt->pan_id & 0xff;
+ panidh = le16_to_cpu(filt->pan_id) >> 8 & 0xff;
+ panidl = le16_to_cpu(filt->pan_id) & 0xff;
write_short_reg(devrec, REG_PANIDH, panidh);
write_short_reg(devrec, REG_PANIDL, panidl);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index d7b2e947184b..46a7790be004 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -136,18 +136,18 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_bh(&dp->rsync);
+ start = u64_stats_fetch_begin_irq(&dp->rsync);
stats->rx_packets = dp->rx_packets;
stats->rx_bytes = dp->rx_bytes;
- } while (u64_stats_fetch_retry_bh(&dp->rsync, start));
+ } while (u64_stats_fetch_retry_irq(&dp->rsync, start));
do {
- start = u64_stats_fetch_begin_bh(&dp->tsync);
+ start = u64_stats_fetch_begin_irq(&dp->tsync);
stats->tx_packets = dp->tx_packets;
stats->tx_bytes = dp->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&dp->tsync, start));
+ } while (u64_stats_fetch_retry_irq(&dp->tsync, start));
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c5011e078e1b..bb96409f8c05 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -111,10 +111,10 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
- start = u64_stats_fetch_begin_bh(&lb_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
tbytes = lb_stats->bytes;
tpackets = lb_stats->packets;
- } while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
bytes += tbytes;
packets += tpackets;
}
@@ -136,16 +136,9 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
- int i;
- dev->lstats = alloc_percpu(struct pcpu_lstats);
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_lstats *lb_stats;
- lb_stats = per_cpu_ptr(dev->lstats, i);
- u64_stats_init(&lb_stats->syncp);
- }
return 0;
}
@@ -160,6 +153,7 @@ static const struct net_device_ops loopback_ops = {
.ndo_init = loopback_dev_init,
.ndo_start_xmit= loopback_xmit,
.ndo_get_stats64 = loopback_get_stats64,
+ .ndo_set_mac_address = eth_mac_addr,
};
/*
@@ -174,6 +168,7 @@ static void loopback_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
@@ -181,6 +176,7 @@ static void loopback_setup(struct net_device *dev)
| NETIF_F_UFO
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
+ | NETIF_F_SCTP_CSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
| NETIF_F_NETNS_LOCAL
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1831fb7cd017..753a8c23d15d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -537,7 +537,6 @@ static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
const struct net_device *lowerdev = vlan->lowerdev;
- int i;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
@@ -549,16 +548,10 @@ static int macvlan_init(struct net_device *dev)
macvlan_set_lockdep_class(dev);
- vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct vlan_pcpu_stats *mvlstats;
- mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
- u64_stats_init(&mvlstats->syncp);
- }
-
return 0;
}
@@ -589,13 +582,13 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
for_each_possible_cpu(i) {
p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index d2bb12bfabd5..34924dfadd00 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -47,16 +47,7 @@ static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
static int nlmon_dev_init(struct net_device *dev)
{
- int i;
-
- dev->lstats = alloc_percpu(struct pcpu_lstats);
-
- for_each_possible_cpu(i) {
- struct pcpu_lstats *nlmstats;
- nlmstats = per_cpu_ptr(dev->lstats, i);
- u64_stats_init(&nlmstats->syncp);
- }
-
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
return dev->lstats == NULL ? -ENOMEM : 0;
}
@@ -99,10 +90,10 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
nl_stats = per_cpu_ptr(dev->lstats, i);
do {
- start = u64_stats_fetch_begin_bh(&nl_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&nl_stats->syncp);
tbytes = nl_stats->bytes;
tpackets = nl_stats->packets;
- } while (u64_stats_fetch_retry_bh(&nl_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start));
packets += tpackets;
bytes += tbytes;
@@ -145,7 +136,8 @@ static void nlmon_setup(struct net_device *dev)
dev->ethtool_ops = &nlmon_ethtool_ops;
dev->destructor = free_netdev;
- dev->features = NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ NETIF_F_HIGHDMA | NETIF_F_LLTX;
dev->flags = IFF_NOARP;
/* That's rather a softlimit here, which, of course,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9b5d46c03eed..6a17f92153b3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -71,6 +71,12 @@ config BCM63XX_PHY
---help---
Currently supports the 6348 and 6358 PHYs.
+config BCM7XXX_PHY
+ tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
+ ---help---
+ Currently supports the BCM7366, BCM7439, BCM7445, and
+ 40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
+
config BCM87XX_PHY
tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 9013dfa12aa3..07d24024863e 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index bc71947b1ec3..643464d5a727 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -27,6 +27,9 @@
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
+#define AT803X_INER 0x0012
+#define AT803X_INER_INIT 0xec00
+#define AT803X_INSR 0x0013
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
@@ -191,6 +194,31 @@ static int at803x_config_init(struct phy_device *phydev)
return 0;
}
+static int at803x_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, AT803X_INSR);
+
+ return (err < 0) ? err : 0;
+}
+
+static int at803x_config_intr(struct phy_device *phydev)
+{
+ int err;
+ int value;
+
+ value = phy_read(phydev, AT803X_INER);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, AT803X_INER,
+ value | AT803X_INER_INIT);
+ else
+ err = phy_write(phydev, AT803X_INER, 0);
+
+ return err;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
@@ -240,6 +268,8 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .ack_interrupt = &at803x_ack_interrupt,
+ .config_intr = &at803x_config_intr,
.driver = {
.owner = THIS_MODULE,
},
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
new file mode 100644
index 000000000000..526b94cea569
--- /dev/null
+++ b/drivers/net/phy/bcm7xxx.c
@@ -0,0 +1,359 @@
+/*
+ * Broadcom BCM7xxx internal transceivers support.
+ *
+ * Copyright (C) 2014, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/brcmphy.h>
+
+/* Broadcom BCM7xxx internal PHY registers */
+#define MII_BCM7XXX_CHANNEL_WIDTH 0x2000
+
+/* 40nm only register definitions */
+#define MII_BCM7XXX_100TX_AUX_CTL 0x10
+#define MII_BCM7XXX_100TX_FALSE_CAR 0x13
+#define MII_BCM7XXX_100TX_DISC 0x14
+#define MII_BCM7XXX_AUX_MODE 0x1d
+#define MII_BCM7XX_64CLK_MDIO BIT(12)
+#define MII_BCM7XXX_CORE_BASE1E 0x1e
+#define MII_BCM7XXX_TEST 0x1f
+#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
+
+/* 28nm only register definitions */
+#define MISC_ADDR(base, channel) base, channel
+
+#define DSP_TAP10 MISC_ADDR(0x0a, 0)
+#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1)
+#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2)
+#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0)
+
+#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
+#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
+#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
+#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
+#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
+
+#define CORE_EXPB0 0xb0
+
+static int bcm7445_config_init(struct phy_device *phydev)
+{
+ int ret;
+ const struct bcm7445_regs {
+ int reg;
+ u16 value;
+ } bcm7445_regs_cfg[] = {
+ /* increases ADC latency by 24ns */
+ { MII_BCM54XX_EXP_SEL, 0x0038 },
+ { MII_BCM54XX_EXP_DATA, 0xAB95 },
+ /* increases internal 1V LDO voltage by 5% */
+ { MII_BCM54XX_EXP_SEL, 0x2038 },
+ { MII_BCM54XX_EXP_DATA, 0xBB22 },
+ /* reduce RX low pass filter corner frequency */
+ { MII_BCM54XX_EXP_SEL, 0x6038 },
+ { MII_BCM54XX_EXP_DATA, 0xFFC5 },
+ /* reduce RX high pass filter corner frequency */
+ { MII_BCM54XX_EXP_SEL, 0x003a },
+ { MII_BCM54XX_EXP_DATA, 0x2002 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) {
+ ret = phy_write(phydev,
+ bcm7445_regs_cfg[i].reg,
+ bcm7445_regs_cfg[i].value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void phy_write_exp(struct phy_device *phydev,
+ u16 reg, u16 value)
+{
+ phy_write(phydev, MII_BCM54XX_EXP_SEL, MII_BCM54XX_EXP_SEL_ER | reg);
+ phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static void phy_write_misc(struct phy_device *phydev,
+ u16 reg, u16 chl, u16 value)
+{
+ int tmp;
+
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+
+ tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+ tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
+
+ tmp = (chl * MII_BCM7XXX_CHANNEL_WIDTH) | reg;
+ phy_write(phydev, MII_BCM54XX_EXP_SEL, tmp);
+
+ phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
+{
+ /* Increase VCO range to prevent unlocking problem of PLL at low
+ * temp
+ */
+ phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
+
+ /* Change Ki to 011 */
+ phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
+
+ /* Disable loading of TVCO buffer to bandgap, set bandgap trim
+ * to 111
+ */
+ phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
+
+ /* Adjust bias current trim by -3 */
+ phy_write_misc(phydev, DSP_TAP10, 0x690b);
+
+ /* Switch to CORE_BASE1E */
+ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
+
+ /* Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, CORE_EXPB0, 0x0010);
+
+ /* Disable Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, CORE_EXPB0, 0x0000);
+
+ /* write AFE_RXCONFIG_0 */
+ phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
+
+ /* write AFE_RXCONFIG_1 */
+ phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
+
+ /* write AFE_RX_LP_COUNTER */
+ phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+
+ /* write AFE_HPF_TRIM_OTHERS */
+ phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
+
+ /* write AFTE_TX_CONFIG */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = bcm7445_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return bcm7xxx_28nm_afe_config_init(phydev);
+}
+
+static int phy_set_clr_bits(struct phy_device *dev, int location,
+ int set_mask, int clr_mask)
+{
+ int v, ret;
+
+ v = phy_read(dev, location);
+ if (v < 0)
+ return v;
+
+ v &= ~clr_mask;
+ v |= set_mask;
+
+ ret = phy_write(dev, location, v);
+ if (ret < 0)
+ return ret;
+
+ return v;
+}
+
+static int bcm7xxx_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable 64 clock MDIO */
+ phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
+ phy_read(phydev, MII_BCM7XXX_AUX_MODE);
+
+ /* Workaround only required for 100Mbits/sec */
+ if (!(phydev->dev_flags & PHY_BRCM_100MBPS_WAR))
+ return 0;
+
+ /* set shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
+ if (ret < 0)
+ return ret;
+
+ /* set iddq_clkbias */
+ phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0F00);
+ udelay(10);
+
+ /* reset iddq_clkbias */
+ phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0C00);
+
+ phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
+
+ /* reset shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Workaround for putting the PHY in IDDQ mode, required
+ * for all BCM7XXX PHYs
+ */
+static int bcm7xxx_suspend(struct phy_device *phydev)
+{
+ int ret;
+ const struct bcm7xxx_regs {
+ int reg;
+ u16 value;
+ } bcm7xxx_suspend_cfg[] = {
+ { MII_BCM7XXX_TEST, 0x008b },
+ { MII_BCM7XXX_100TX_AUX_CTL, 0x01c0 },
+ { MII_BCM7XXX_100TX_DISC, 0x7000 },
+ { MII_BCM7XXX_TEST, 0x000f },
+ { MII_BCM7XXX_100TX_AUX_CTL, 0x20d0 },
+ { MII_BCM7XXX_TEST, 0x000b },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm7xxx_suspend_cfg); i++) {
+ ret = phy_write(phydev,
+ bcm7xxx_suspend_cfg[i].reg,
+ bcm7xxx_suspend_cfg[i].value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static struct phy_driver bcm7xxx_driver[] = {
+{
+ .phy_id = PHY_ID_BCM7366,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7366",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_afe_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_afe_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM7439,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7439",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_afe_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_afe_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM7445,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7445",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .name = "Broadcom BCM7XXX 28nm",
+ .phy_id = PHY_ID_BCM7XXX_28,
+ .phy_id_mask = PHY_BCM_OUI_MASK,
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_BCM_OUI_4,
+ .phy_id_mask = 0xffff0000,
+ .name = "Broadcom BCM7XXX 40nm",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_BCM_OUI_5,
+ .phy_id_mask = 0xffffff00,
+ .name = "Broadcom BCM7XXX 65nm",
+ .features = PHY_BASIC_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_dummy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_config_init,
+ .driver = { .owner = THIS_MODULE },
+} };
+
+static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+ { PHY_ID_BCM7366, 0xfffffff0, },
+ { PHY_ID_BCM7439, 0xfffffff0, },
+ { PHY_ID_BCM7445, 0xfffffff0, },
+ { PHY_ID_BCM7XXX_28, 0xfffffc00 },
+ { PHY_BCM_OUI_4, 0xffff0000 },
+ { PHY_BCM_OUI_5, 0xffffff00 },
+ { }
+};
+
+static int __init bcm7xxx_phy_init(void)
+{
+ return phy_drivers_register(bcm7xxx_driver,
+ ARRAY_SIZE(bcm7xxx_driver));
+}
+
+static void __exit bcm7xxx_phy_exit(void)
+{
+ phy_drivers_unregister(bcm7xxx_driver,
+ ARRAY_SIZE(bcm7xxx_driver));
+}
+
+module_init(bcm7xxx_phy_init);
+module_exit(bcm7xxx_phy_exit);
+
+MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl);
+
+MODULE_DESCRIPTION("Broadcom BCM7xxx internal PHY driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f8c90ea75108..34088d60da74 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -25,58 +25,6 @@
#define BRCM_PHY_REV(phydev) \
((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
-
-#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
-#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
-#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
-
-#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
-#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
-
-#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
-#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
-#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
-#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
-
-#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
-#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
-#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
-#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
-#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */
-#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */
-#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */
-#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */
-#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */
-#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */
-#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */
-#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */
-#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */
-#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */
-#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */
-#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */
-#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
-#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
-
-#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
-#define MII_BCM54XX_SHD_WRITE 0x8000
-#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
-#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
-
-/*
- * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
- */
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
-#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
-#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
-
-#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
-#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
-#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
-
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
-
-
/*
* Broadcom LED source encodings. These are used in BCM5461, BCM5481,
* BCM5482, and possibly some others.
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 98e7cbf720a5..6a999e6814a0 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include <linux/phy.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
@@ -47,6 +48,7 @@
#define CAL_EVENT 7
#define CAL_TRIGGER 7
#define PER_TRIGGER 6
+#define DP83640_N_PINS 12
#define MII_DP83640_MICR 0x11
#define MII_DP83640_MISR 0x12
@@ -173,6 +175,37 @@ MODULE_PARM_DESC(chosen_phy, \
MODULE_PARM_DESC(gpio_tab, \
"Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
+static void dp83640_gpio_defaults(struct ptp_pin_desc *pd)
+{
+ int i, index;
+
+ for (i = 0; i < DP83640_N_PINS; i++) {
+ snprintf(pd[i].name, sizeof(pd[i].name), "GPIO%d", 1 + i);
+ pd[i].index = i;
+ }
+
+ for (i = 0; i < GPIO_TABLE_SIZE; i++) {
+ if (gpio_tab[i] < 1 || gpio_tab[i] > DP83640_N_PINS) {
+ pr_err("gpio_tab[%d]=%hu out of range", i, gpio_tab[i]);
+ return;
+ }
+ }
+
+ index = gpio_tab[CALIBRATE_GPIO] - 1;
+ pd[index].func = PTP_PF_PHYSYNC;
+ pd[index].chan = 0;
+
+ index = gpio_tab[PEROUT_GPIO] - 1;
+ pd[index].func = PTP_PF_PEROUT;
+ pd[index].chan = 0;
+
+ for (i = EXTTS0_GPIO; i < GPIO_TABLE_SIZE; i++) {
+ index = gpio_tab[i] - 1;
+ pd[index].func = PTP_PF_EXTTS;
+ pd[index].chan = i - EXTTS0_GPIO;
+ }
+}
+
/* a list of clocks and a mutex to protect it */
static LIST_HEAD(phyter_clocks);
static DEFINE_MUTEX(phyter_clocks_lock);
@@ -266,15 +299,22 @@ static u64 phy2txts(struct phy_txts *p)
return ns;
}
-static void periodic_output(struct dp83640_clock *clock,
- struct ptp_clock_request *clkreq, bool on)
+static int periodic_output(struct dp83640_clock *clock,
+ struct ptp_clock_request *clkreq, bool on)
{
struct dp83640_private *dp83640 = clock->chosen;
struct phy_device *phydev = dp83640->phydev;
- u32 sec, nsec, period;
+ u32 sec, nsec, pwidth;
u16 gpio, ptp_trig, trigger, val;
- gpio = on ? gpio_tab[PEROUT_GPIO] : 0;
+ if (on) {
+ gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT, 0);
+ if (gpio < 1)
+ return -EINVAL;
+ } else {
+ gpio = 0;
+ }
+
trigger = PER_TRIGGER;
ptp_trig = TRIG_WR |
@@ -291,13 +331,14 @@ static void periodic_output(struct dp83640_clock *clock,
ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
ext_write(0, phydev, PAGE4, PTP_CTL, val);
mutex_unlock(&clock->extreg_lock);
- return;
+ return 0;
}
sec = clkreq->perout.start.sec;
nsec = clkreq->perout.start.nsec;
- period = clkreq->perout.period.sec * 1000000000UL;
- period += clkreq->perout.period.nsec;
+ pwidth = clkreq->perout.period.sec * 1000000000UL;
+ pwidth += clkreq->perout.period.nsec;
+ pwidth /= 2;
mutex_lock(&clock->extreg_lock);
@@ -310,8 +351,8 @@ static void periodic_output(struct dp83640_clock *clock,
ext_write(0, phydev, PAGE4, PTP_TDR, nsec >> 16); /* ns[31:16] */
ext_write(0, phydev, PAGE4, PTP_TDR, sec & 0xffff); /* sec[15:0] */
ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16); /* sec[31:16] */
- ext_write(0, phydev, PAGE4, PTP_TDR, period & 0xffff); /* ns[15:0] */
- ext_write(0, phydev, PAGE4, PTP_TDR, period >> 16); /* ns[31:16] */
+ ext_write(0, phydev, PAGE4, PTP_TDR, pwidth & 0xffff); /* ns[15:0] */
+ ext_write(0, phydev, PAGE4, PTP_TDR, pwidth >> 16); /* ns[31:16] */
/*enable trigger*/
val &= ~TRIG_LOAD;
@@ -319,6 +360,7 @@ static void periodic_output(struct dp83640_clock *clock,
ext_write(0, phydev, PAGE4, PTP_CTL, val);
mutex_unlock(&clock->extreg_lock);
+ return 0;
}
/* ptp clock methods */
@@ -424,18 +466,21 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
struct dp83640_clock *clock =
container_of(ptp, struct dp83640_clock, caps);
struct phy_device *phydev = clock->chosen->phydev;
- int index;
+ unsigned int index;
u16 evnt, event_num, gpio_num;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
index = rq->extts.index;
- if (index < 0 || index >= N_EXT_TS)
+ if (index >= N_EXT_TS)
return -EINVAL;
event_num = EXT_EVENT + index;
evnt = EVNT_WR | (event_num & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
if (on) {
- gpio_num = gpio_tab[EXTTS0_GPIO + index];
+ gpio_num = 1 + ptp_find_pin(clock->ptp_clock,
+ PTP_PF_EXTTS, index);
+ if (gpio_num < 1)
+ return -EINVAL;
evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
if (rq->extts.flags & PTP_FALLING_EDGE)
evnt |= EVNT_FALL;
@@ -448,8 +493,7 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
case PTP_CLK_REQ_PEROUT:
if (rq->perout.index != 0)
return -EINVAL;
- periodic_output(clock, rq, on);
- return 0;
+ return periodic_output(clock, rq, on);
default:
break;
@@ -458,6 +502,12 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
+static int ptp_dp83640_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return 0;
+}
+
static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
@@ -875,6 +925,7 @@ static void dp83640_free_clocks(void)
mutex_destroy(&clock->extreg_lock);
mutex_destroy(&clock->clock_lock);
put_device(&clock->bus->dev);
+ kfree(clock->caps.pin_config);
kfree(clock);
}
@@ -894,12 +945,18 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
clock->caps.n_alarm = 0;
clock->caps.n_ext_ts = N_EXT_TS;
clock->caps.n_per_out = 1;
+ clock->caps.n_pins = DP83640_N_PINS;
clock->caps.pps = 0;
clock->caps.adjfreq = ptp_dp83640_adjfreq;
clock->caps.adjtime = ptp_dp83640_adjtime;
clock->caps.gettime = ptp_dp83640_gettime;
clock->caps.settime = ptp_dp83640_settime;
clock->caps.enable = ptp_dp83640_enable;
+ clock->caps.verify = ptp_dp83640_verify;
+ /*
+ * Convert the module param defaults into a dynamic pin configuration.
+ */
+ dp83640_gpio_defaults(clock->caps.pin_config);
/*
* Get a reference to this bus instance.
*/
@@ -950,6 +1007,13 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
if (!clock)
goto out;
+ clock->caps.pin_config = kzalloc(sizeof(struct ptp_pin_desc) *
+ DP83640_N_PINS, GFP_KERNEL);
+ if (!clock->caps.pin_config) {
+ kfree(clock);
+ clock = NULL;
+ goto out;
+ }
dp83640_clock_init(clock, bus);
list_add_tail(&phyter_clocks, &clock->list);
out:
@@ -1363,7 +1427,7 @@ static void __exit dp83640_exit(void)
}
MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
-MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.at>");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
MODULE_LICENSE("GPL");
module_init(dp83640_init);
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 9367acc84fbb..15bc7f9ea224 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -90,11 +90,6 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
-static int sun4i_mdio_reset(struct mii_bus *bus)
-{
- return 0;
-}
-
static int sun4i_mdio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -110,7 +105,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
bus->name = "sun4i_mii_bus";
bus->read = &sun4i_mdio_read;
bus->write = &sun4i_mdio_write;
- bus->reset = &sun4i_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
bus->parent = &pdev->dev;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 71e49000fbf3..76f54b32a120 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -432,8 +432,28 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(phy_id);
+static ssize_t
+phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%s\n", phy_modes(phydev->interface));
+}
+static DEVICE_ATTR_RO(phy_interface);
+
+static ssize_t
+phy_has_fixups_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%d\n", phydev->has_fixups);
+}
+static DEVICE_ATTR_RO(phy_has_fixups);
+
static struct attribute *mdio_dev_attrs[] = {
&dev_attr_phy_id.attr,
+ &dev_attr_phy_interface.attr,
+ &dev_attr_phy_has_fixups.attr,
NULL,
};
ATTRIBUTE_GROUPS(mdio_dev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5a8993b0cafc..5ad971a55c5d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -148,15 +148,52 @@ static int ks8737_config_intr(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
+static int kszphy_setup_led(struct phy_device *phydev,
+ unsigned int reg, unsigned int shift)
+{
+
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+ int rc, temp;
+ u32 val;
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (of_property_read_u32(of_node, "micrel,led-mode", &val))
+ return 0;
+
+ temp = phy_read(phydev, reg);
+ if (temp < 0)
+ return temp;
+
+ temp &= ~(3 << shift);
+ temp |= val << shift;
+ rc = phy_write(phydev, reg, temp);
+
+ return rc < 0 ? rc : 0;
+}
+
static int kszphy_config_init(struct phy_device *phydev)
{
return 0;
}
+static int kszphy_config_init_led8041(struct phy_device *phydev)
+{
+ /* single led control, register 0x1e bits 15..14 */
+ return kszphy_setup_led(phydev, 0x1e, 14);
+}
+
static int ksz8021_config_init(struct phy_device *phydev)
{
- int rc;
const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
+ int rc;
+
+ rc = kszphy_setup_led(phydev, 0x1f, 4);
+ if (rc)
+ dev_err(&phydev->dev, "failed to set led mode\n");
+
phy_write(phydev, MII_KSZPHY_OMSO, val);
rc = ksz_config_flags(phydev);
return rc < 0 ? rc : 0;
@@ -166,6 +203,10 @@ static int ks8051_config_init(struct phy_device *phydev)
{
int rc;
+ rc = kszphy_setup_led(phydev, 0x1f, 4);
+ if (rc)
+ dev_err(&phydev->dev, "failed to set led mode\n");
+
rc = ksz_config_flags(phydev);
return rc < 0 ? rc : 0;
}
@@ -327,7 +368,7 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = kszphy_config_init_led8041,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -342,7 +383,7 @@ static struct phy_driver ksphy_driver[] = {
.features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = kszphy_config_init_led8041,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -371,7 +412,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = kszphy_config_init_led8041,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 76d96b9ebcdb..1d788f19135b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -38,6 +38,26 @@
#include <asm/irq.h>
+static const char *phy_speed_to_str(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return "10Mbps";
+ case SPEED_100:
+ return "100Mbps";
+ case SPEED_1000:
+ return "1Gbps";
+ case SPEED_2500:
+ return "2.5Gbps";
+ case SPEED_10000:
+ return "10Gbps";
+ case SPEED_UNKNOWN:
+ return "Unknown";
+ default:
+ return "Unsupported (update phy.c)";
+ }
+}
+
/**
* phy_print_status - Convenience function to print out the current phy status
* @phydev: the phy_device struct
@@ -45,12 +65,13 @@
void phy_print_status(struct phy_device *phydev)
{
if (phydev->link) {
- pr_info("%s - Link is Up - %d/%s\n",
- dev_name(&phydev->dev),
- phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ netdev_info(phydev->attached_dev,
+ "Link is Up - %s/%s - flow control %s\n",
+ phy_speed_to_str(phydev->speed),
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
+ phydev->pause ? "rx/tx" : "off");
} else {
- pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+ netdev_info(phydev->attached_dev, "Link is Down\n");
}
}
EXPORT_SYMBOL(phy_print_status);
@@ -62,7 +83,7 @@ EXPORT_SYMBOL(phy_print_status);
* If the @phydev driver has an ack_interrupt function, call it to
* ack and clear the phy device's interrupt.
*
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
*/
static int phy_clear_interrupt(struct phy_device *phydev)
{
@@ -77,7 +98,7 @@ static int phy_clear_interrupt(struct phy_device *phydev)
* @phydev: the phy_device struct
* @interrupts: interrupt flags to configure for this @phydev
*
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
*/
static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
@@ -93,15 +114,16 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
* phy_aneg_done - return auto-negotiation status
* @phydev: target phy_device struct
*
- * Description: Reads the status register and returns 0 either if
- * auto-negotiation is incomplete, or if there was an error.
- * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ * Description: Return the auto-negotiation status from this @phydev
+ * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
+ * is still pending.
*/
static inline int phy_aneg_done(struct phy_device *phydev)
{
- int retval = phy_read(phydev, MII_BMSR);
+ if (phydev->drv->aneg_done)
+ return phydev->drv->aneg_done(phydev);
- return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+ return genphy_aneg_done(phydev);
}
/* A structure for mapping a particular speed and duplex
@@ -283,7 +305,10 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, phydev->speed);
cmd->duplex = phydev->duplex;
- cmd->port = PORT_MII;
+ if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+ cmd->port = PORT_BNC;
+ else
+ cmd->port = PORT_MII;
cmd->phy_address = phydev->addr;
cmd->transceiver = phy_is_internal(phydev) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 2f6989b1e0dc..0ce606624296 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -139,6 +139,7 @@ static int phy_scan_fixups(struct phy_device *phydev)
mutex_unlock(&phy_fixup_lock);
return err;
}
+ phydev->has_fixups = true;
}
}
mutex_unlock(&phy_fixup_lock);
@@ -534,16 +535,16 @@ static int phy_poll_reset(struct phy_device *phydev)
int phy_init_hw(struct phy_device *phydev)
{
- int ret;
+ int ret = 0;
if (!phydev->drv || !phydev->drv->config_init)
return 0;
- ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
- if (ret < 0)
- return ret;
+ if (phydev->drv->soft_reset)
+ ret = phydev->drv->soft_reset(phydev);
+ else
+ ret = genphy_soft_reset(phydev);
- ret = phy_poll_reset(phydev);
if (ret < 0)
return ret;
@@ -864,6 +865,22 @@ int genphy_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_aneg);
+/**
+ * genphy_aneg_done - return auto-negotiation status
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the status register and returns 0 either if
+ * auto-negotiation is incomplete, or if there was an error.
+ * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ */
+int genphy_aneg_done(struct phy_device *phydev)
+{
+ int retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+EXPORT_SYMBOL(genphy_aneg_done);
+
static int gen10g_config_aneg(struct phy_device *phydev)
{
return 0;
@@ -1029,6 +1046,27 @@ static int gen10g_read_status(struct phy_device *phydev)
return 0;
}
+/**
+ * genphy_soft_reset - software reset the PHY via BMCR_RESET bit
+ * @phydev: target phy_device struct
+ *
+ * Description: Perform a software PHY reset using the standard
+ * BMCR_RESET bit and poll for the reset bit to be cleared.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int genphy_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ return phy_poll_reset(phydev);
+}
+EXPORT_SYMBOL(genphy_soft_reset);
+
static int genphy_config_init(struct phy_device *phydev)
{
int val;
@@ -1075,6 +1113,12 @@ static int genphy_config_init(struct phy_device *phydev)
return 0;
}
+static int gen10g_soft_reset(struct phy_device *phydev)
+{
+ /* Do nothing for now */
+ return 0;
+}
+
static int gen10g_config_init(struct phy_device *phydev)
{
/* Temporarily just say we support everything */
@@ -1249,9 +1293,11 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
+ .soft_reset = genphy_soft_reset,
.config_init = genphy_config_init,
.features = 0,
.config_aneg = genphy_config_aneg,
+ .aneg_done = genphy_aneg_done,
.read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1260,6 +1306,7 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic 10G PHY",
+ .soft_reset = gen10g_soft_reset,
.config_init = gen10g_config_init,
.features = 0,
.config_aneg = gen10g_config_aneg,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 72ff14b811c6..e3923ebb693f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -143,9 +143,8 @@ struct ppp {
struct sk_buff_head mrq; /* MP: receive reconstruction queue */
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
- struct sock_filter *pass_filter; /* filter for packets to pass */
- struct sock_filter *active_filter;/* filter for pkts to reset idle */
- unsigned pass_len, active_len;
+ struct sk_filter *pass_filter; /* filter for packets to pass */
+ struct sk_filter *active_filter;/* filter for pkts to reset idle */
#endif /* CONFIG_PPP_FILTER */
struct net *ppp_net; /* the net we belong to */
struct ppp_link_stats stats64; /* 64 bit network stats */
@@ -755,28 +754,42 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PPPIOCSPASS:
{
struct sock_filter *code;
+
err = get_filter(argp, &code);
if (err >= 0) {
+ struct sock_fprog fprog = {
+ .len = err,
+ .filter = code,
+ };
+
ppp_lock(ppp);
- kfree(ppp->pass_filter);
- ppp->pass_filter = code;
- ppp->pass_len = err;
+ if (ppp->pass_filter)
+ sk_unattached_filter_destroy(ppp->pass_filter);
+ err = sk_unattached_filter_create(&ppp->pass_filter,
+ &fprog);
+ kfree(code);
ppp_unlock(ppp);
- err = 0;
}
break;
}
case PPPIOCSACTIVE:
{
struct sock_filter *code;
+
err = get_filter(argp, &code);
if (err >= 0) {
+ struct sock_fprog fprog = {
+ .len = err,
+ .filter = code,
+ };
+
ppp_lock(ppp);
- kfree(ppp->active_filter);
- ppp->active_filter = code;
- ppp->active_len = err;
+ if (ppp->active_filter)
+ sk_unattached_filter_destroy(ppp->active_filter);
+ err = sk_unattached_filter_create(&ppp->active_filter,
+ &fprog);
+ kfree(code);
ppp_unlock(ppp);
- err = 0;
}
break;
}
@@ -1184,7 +1197,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
a four-byte PPP header on each packet */
*skb_push(skb, 2) = 1;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter) == 0) {
+ SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: outbound frame "
@@ -1194,7 +1207,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
}
/* if this packet passes the active filter, record the time */
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter) == 0))
+ SK_RUN_FILTER(ppp->active_filter, skb) == 0))
ppp->last_xmit = jiffies;
skb_pull(skb, 2);
#else
@@ -1818,7 +1831,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
*skb_push(skb, 2) = 0;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter) == 0) {
+ SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
"PPP: inbound frame "
@@ -1827,7 +1840,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
return;
}
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter) == 0))
+ SK_RUN_FILTER(ppp->active_filter, skb) == 0))
ppp->last_recv = jiffies;
__skb_pull(skb, 2);
} else
@@ -2672,6 +2685,10 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ppp->minseq = -1;
skb_queue_head_init(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+ ppp->pass_filter = NULL;
+ ppp->active_filter = NULL;
+#endif /* CONFIG_PPP_FILTER */
/*
* drum roll: don't forget to set
@@ -2802,10 +2819,15 @@ static void ppp_destroy_interface(struct ppp *ppp)
skb_queue_purge(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
- kfree(ppp->pass_filter);
- ppp->pass_filter = NULL;
- kfree(ppp->active_filter);
- ppp->active_filter = NULL;
+ if (ppp->pass_filter) {
+ sk_unattached_filter_destroy(ppp->pass_filter);
+ ppp->pass_filter = NULL;
+ }
+
+ if (ppp->active_filter) {
+ sk_unattached_filter_destroy(ppp->active_filter);
+ ppp->active_filter = NULL;
+ }
#endif /* CONFIG_PPP_FILTER */
kfree_skb(ppp->xmit_pending);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c8624a8235ab..33008c1d1d67 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1031,8 +1031,7 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
- gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
struct netpoll *np;
int err;
@@ -1040,11 +1039,11 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
if (!team->dev->npinfo)
return 0;
- np = kzalloc(sizeof(*np), gfp);
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
- err = __netpoll_setup(np, port->dev, gfp);
+ err = __netpoll_setup(np, port->dev);
if (err) {
kfree(np);
return err;
@@ -1067,8 +1066,7 @@ static void team_port_disable_netpoll(struct team_port *port)
kfree(np);
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
- gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
{
return 0;
}
@@ -1156,7 +1154,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- err = team_port_enable_netpoll(team, port, GFP_KERNEL);
+ err = team_port_enable_netpoll(team, port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1540,16 +1538,10 @@ static int team_init(struct net_device *dev)
mutex_init(&team->lock);
team_set_no_mode(team);
- team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
if (!team->pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct team_pcpu_stats *team_stats;
- team_stats = per_cpu_ptr(team->pcpu_stats, i);
- u64_stats_init(&team_stats->syncp);
- }
-
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
INIT_HLIST_HEAD(&team->en_port_hlist[i]);
INIT_LIST_HEAD(&team->port_list);
@@ -1767,13 +1759,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
for_each_possible_cpu(i) {
p = per_cpu_ptr(team->pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_bh(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -1856,7 +1848,7 @@ static void team_netpoll_cleanup(struct net_device *dev)
}
static int team_netpoll_setup(struct net_device *dev,
- struct netpoll_info *npifo, gfp_t gfp)
+ struct netpoll_info *npifo)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
@@ -1864,7 +1856,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port, gfp);
+ err = team_port_enable_netpoll(team, port);
if (err) {
__team_netpoll_cleanup(team);
break;
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index d671fc3ac5ac..dbde3412ee5e 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -432,9 +432,9 @@ static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
struct lb_stats tmp;
do {
- start = u64_stats_fetch_begin_bh(syncp);
+ start = u64_stats_fetch_begin_irq(syncp);
tmp.tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(syncp, start));
+ } while (u64_stats_fetch_retry_irq(syncp, start));
acc_stats->tx_bytes += tmp.tx_bytes;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 26f8635b027d..ee328ba101e7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -452,7 +452,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
--tun->numqueues;
if (clean) {
- rcu_assign_pointer(tfile->tun, NULL);
+ RCU_INIT_POINTER(tfile->tun, NULL);
sock_put(&tfile->sk);
} else
tun_disable_queue(tun, tfile);
@@ -499,12 +499,12 @@ static void tun_detach_all(struct net_device *dev)
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
wake_up_all(&tfile->wq.wait);
- rcu_assign_pointer(tfile->tun, NULL);
+ RCU_INIT_POINTER(tfile->tun, NULL);
--tun->numqueues;
}
list_for_each_entry(tfile, &tun->disabled, next) {
wake_up_all(&tfile->wq.wait);
- rcu_assign_pointer(tfile->tun, NULL);
+ RCU_INIT_POINTER(tfile->tun, NULL);
}
BUG_ON(tun->numqueues != 0);
@@ -2194,7 +2194,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
&tun_proto);
if (!tfile)
return -ENOMEM;
- rcu_assign_pointer(tfile->tun, NULL);
+ RCU_INIT_POINTER(tfile->tun, NULL);
tfile->net = get_net(current->nsproxy->net_ns);
tfile->flags = 0;
tfile->ifindex = 0;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index bd363b27e854..9ea4bfe5d318 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -625,6 +625,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Novatel Expedite E371 - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9011, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* AnyDATA ADU960S - handled by qmi_wwan */
{
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d350d2795e10..549dbac710ed 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -73,6 +73,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
u8 iface_no;
int err;
int eth_hlen;
+ u16 mbim_mtu;
u16 ntb_fmt_supported;
__le16 max_datagram_size;
@@ -252,6 +253,14 @@ out:
/* set MTU to max supported by the device if necessary */
if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
dev->net->mtu = ctx->max_datagram_size - eth_hlen;
+
+ /* do not exceed operater preferred MTU */
+ if (ctx->mbim_extended_desc) {
+ mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
+ if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
+ dev->net->mtu = mbim_mtu;
+ }
+
return 0;
}
@@ -390,6 +399,14 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf;
break;
+ case USB_CDC_MBIM_EXTENDED_TYPE:
+ if (buf[0] < sizeof(*(ctx->mbim_extended_desc)))
+ break;
+
+ ctx->mbim_extended_desc =
+ (const struct usb_cdc_mbim_extended_desc *)buf;
+ break;
+
default:
break;
}
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index acfcc32b323d..8f37efd2d2fb 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -210,7 +210,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* (0x86dd) so Linux can understand it.
*/
if ((buf->data[sizeof(*ethhdr)] & 0xf0) == 0x60)
- ethhdr->h_proto = __constant_htons(ETH_P_IPV6);
+ ethhdr->h_proto = htons(ETH_P_IPV6);
}
if (count) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 313cb6cd4848..e3458e3c44f1 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -500,6 +500,13 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Novatel Expedite E371 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0x9011,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* Dell Wireless 5800 (Novatel E362) */
USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195,
USB_CLASS_COMM,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index adb12f349a61..18e12a3f7fc3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -21,9 +21,10 @@
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
/* Version Information */
-#define DRIVER_VERSION "v1.04.0 (2014/01/15)"
+#define DRIVER_VERSION "v1.06.0 (2014/03/03)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -59,9 +60,11 @@
#define PLA_TCR0 0xe610
#define PLA_TCR1 0xe612
#define PLA_TXFIFO_CTRL 0xe618
-#define PLA_RSTTELLY 0xe800
+#define PLA_RSTTALLY 0xe800
#define PLA_CR 0xe813
#define PLA_CRWECR 0xe81c
+#define PLA_CONFIG12 0xe81e /* CONFIG1, CONFIG2 */
+#define PLA_CONFIG34 0xe820 /* CONFIG3, CONFIG4 */
#define PLA_CONFIG5 0xe822
#define PLA_PHY_PWR 0xe84c
#define PLA_OOB_CTRL 0xe84f
@@ -69,7 +72,7 @@
#define PLA_MISC_0 0xe858
#define PLA_MISC_1 0xe85a
#define PLA_OCP_GPHY_BASE 0xe86c
-#define PLA_TELLYCNT 0xe890
+#define PLA_TALLYCNT 0xe890
#define PLA_SFF_STS_7 0xe8de
#define PLA_PHYSTATUS 0xe908
#define PLA_BP_BA 0xfc26
@@ -177,6 +180,9 @@
/* PLA_TCR1 */
#define VERSION_MASK 0x7cf0
+/* PLA_RSTTALLY */
+#define TALLY_RESET 0x0001
+
/* PLA_CR */
#define CR_RST 0x10
#define CR_RE 0x08
@@ -216,7 +222,14 @@
/* PAL_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
+/* PLA_CONFIG34 */
+#define LINK_ON_WAKE_EN 0x0010
+#define LINK_OFF_WAKE_EN 0x0008
+
/* PLA_CONFIG5 */
+#define BWF_EN 0x0040
+#define MWF_EN 0x0020
+#define UWF_EN 0x0010
#define LAN_WAKE_EN 0x0002
/* PLA_LED_FEATURE */
@@ -436,6 +449,9 @@ enum rtl8152_flags {
RTL8152_SET_RX_MODE,
WORK_ENABLE,
RTL8152_LINK_CHG,
+ SELECTIVE_SUSPEND,
+ PHY_RESET,
+ SCHEDULE_TASKLET,
};
/* Define these values to match your device */
@@ -449,11 +465,40 @@ enum rtl8152_flags {
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
+#define REALTEK_USB_DEVICE(vend, prod) \
+ USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
+
+struct tally_counter {
+ __le64 tx_packets;
+ __le64 rx_packets;
+ __le64 tx_errors;
+ __le32 rx_errors;
+ __le16 rx_missed;
+ __le16 align_errors;
+ __le32 tx_one_collision;
+ __le32 tx_multi_collision;
+ __le64 rx_unicast;
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+ __le16 tx_underun;
+};
+
struct rx_desc {
__le32 opts1;
#define RX_LEN_MASK 0x7fff
+
__le32 opts2;
+#define RD_UDP_CS (1 << 23)
+#define RD_TCP_CS (1 << 22)
+#define RD_IPV6_CS (1 << 20)
+#define RD_IPV4_CS (1 << 19)
+
__le32 opts3;
+#define IPF (1 << 23) /* IP checksum fail */
+#define UDPF (1 << 22) /* UDP checksum fail */
+#define TCPF (1 << 21) /* TCP checksum fail */
+
__le32 opts4;
__le32 opts5;
__le32 opts6;
@@ -463,13 +508,21 @@ struct tx_desc {
__le32 opts1;
#define TX_FS (1 << 31) /* First segment of a packet */
#define TX_LS (1 << 30) /* Final segment of a packet */
-#define TX_LEN_MASK 0x3ffff
+#define GTSENDV4 (1 << 28)
+#define GTSENDV6 (1 << 27)
+#define GTTCPHO_SHIFT 18
+#define GTTCPHO_MAX 0x7fU
+#define TX_LEN_MAX 0x3ffffU
__le32 opts2;
#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */
+#define MSS_SHIFT 17
+#define MSS_MAX 0x7ffU
+#define TCPHO_SHIFT 17
+#define TCPHO_MAX 0x7ffU
};
struct r8152;
@@ -511,11 +564,13 @@ struct r8152 {
void (*init)(struct r8152 *);
int (*enable)(struct r8152 *);
void (*disable)(struct r8152 *);
+ void (*up)(struct r8152 *);
void (*down)(struct r8152 *);
void (*unload)(struct r8152 *);
} rtl_ops;
int intr_interval;
+ u32 saved_wolopts;
u32 msg_enable;
u32 tx_qlen;
u16 ocp_base;
@@ -534,12 +589,21 @@ enum rtl_version {
RTL_VER_MAX
};
+enum tx_csum_stat {
+ TX_CSUM_SUCCESS = 0,
+ TX_CSUM_TSO,
+ TX_CSUM_NONE
+};
+
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
* The RTL chips use a 64 element hash table based on the Ethernet CRC.
*/
static const int multicast_filter_limit = 32;
static unsigned int rx_buf_sz = 16384;
+#define RTL_LIMITED_TSO_SIZE (rx_buf_sz - sizeof(struct tx_desc) - \
+ VLAN_ETH_HLEN - VLAN_HLEN)
+
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
@@ -577,6 +641,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
value, index, tmp, size, 500);
kfree(tmp);
+
return ret;
}
@@ -862,11 +927,21 @@ static u16 sram_read(struct r8152 *tp, u16 addr)
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
+ int ret;
if (phy_id != R8152_PHY_ID)
return -EINVAL;
- return r8152_mdio_read(tp, reg);
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ ret = r8152_mdio_read(tp, reg);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
}
static
@@ -877,7 +952,12 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
if (phy_id != R8152_PHY_ID)
return;
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
r8152_mdio_write(tp, reg, val);
+
+ usb_autopm_put_interface(tp->intf);
}
static
@@ -886,11 +966,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
+ int ret;
u8 node_id[8] = {0};
- if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
+ if (tp->version == RTL_VER_01)
+ ret = pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id);
+ else
+ ret = pla_ocp_read(tp, PLA_BACKUP, sizeof(node_id), node_id);
+
+ if (ret < 0) {
netif_notice(tp, probe, dev, "inet addr fail\n");
- else {
+ } else {
+ if (tp->version != RTL_VER_01) {
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+ CRWECR_CONFIG);
+ pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES,
+ sizeof(node_id), node_id);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+ CRWECR_NORAML);
+ }
+
memcpy(dev->dev_addr, node_id, dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
}
@@ -913,15 +1008,9 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
-static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
-{
- return &dev->stats;
-}
-
static void read_bulk_callback(struct urb *urb)
{
struct net_device *netdev;
- unsigned long flags;
int status = urb->status;
struct rx_agg *agg;
struct r8152 *tp;
@@ -948,14 +1037,16 @@ static void read_bulk_callback(struct urb *urb)
if (!netif_carrier_ok(netdev))
return;
+ usb_mark_last_busy(tp->udev);
+
switch (status) {
case 0:
if (urb->actual_length < ETH_ZLEN)
break;
- spin_lock_irqsave(&tp->rx_lock, flags);
+ spin_lock(&tp->rx_lock);
list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
+ spin_unlock(&tp->rx_lock);
tasklet_schedule(&tp->tl);
return;
case -ESHUTDOWN:
@@ -978,9 +1069,9 @@ static void read_bulk_callback(struct urb *urb)
if (result == -ENODEV) {
netif_device_detach(tp->netdev);
} else if (result) {
- spin_lock_irqsave(&tp->rx_lock, flags);
+ spin_lock(&tp->rx_lock);
list_add_tail(&agg->list, &tp->rx_done);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
+ spin_unlock(&tp->rx_lock);
tasklet_schedule(&tp->tl);
}
}
@@ -988,7 +1079,7 @@ static void read_bulk_callback(struct urb *urb)
static void write_bulk_callback(struct urb *urb)
{
struct net_device_stats *stats;
- unsigned long flags;
+ struct net_device *netdev;
struct tx_agg *agg;
struct r8152 *tp;
int status = urb->status;
@@ -1001,21 +1092,24 @@ static void write_bulk_callback(struct urb *urb)
if (!tp)
return;
- stats = rtl8152_get_stats(tp->netdev);
+ netdev = tp->netdev;
+ stats = &netdev->stats;
if (status) {
if (net_ratelimit())
- netdev_warn(tp->netdev, "Tx status %d\n", status);
+ netdev_warn(netdev, "Tx status %d\n", status);
stats->tx_errors += agg->skb_num;
} else {
stats->tx_packets += agg->skb_num;
stats->tx_bytes += agg->skb_len;
}
- spin_lock_irqsave(&tp->tx_lock, flags);
+ spin_lock(&tp->tx_lock);
list_add_tail(&agg->list, &tp->tx_free);
- spin_unlock_irqrestore(&tp->tx_lock, flags);
+ spin_unlock(&tp->tx_lock);
- if (!netif_carrier_ok(tp->netdev))
+ usb_autopm_put_interface_async(tp->intf);
+
+ if (!netif_carrier_ok(netdev))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -1220,6 +1314,9 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
struct tx_agg *agg = NULL;
unsigned long flags;
+ if (list_empty(&tp->tx_free))
+ return NULL;
+
spin_lock_irqsave(&tp->tx_lock, flags);
if (!list_empty(&tp->tx_free)) {
struct list_head *cursor;
@@ -1233,24 +1330,138 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
return agg;
}
-static void
-r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
+static inline __be16 get_protocol(struct sk_buff *skb)
+{
+ __be16 protocol;
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ return protocol;
+}
+
+/*
+ * r8152_csum_workaround()
+ * The hw limites the value the transport offset. When the offset is out of the
+ * range, calculate the checksum by sw.
+ */
+static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ if (skb_shinfo(skb)->gso_size) {
+ netdev_features_t features = tp->netdev->features;
+ struct sk_buff_head seg_list;
+ struct sk_buff *segs, *nskb;
+
+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+ segs = skb_gso_segment(skb, features);
+ if (IS_ERR(segs) || !segs)
+ goto drop;
+
+ __skb_queue_head_init(&seg_list);
+
+ do {
+ nskb = segs;
+ segs = segs->next;
+ nskb->next = NULL;
+ __skb_queue_tail(&seg_list, nskb);
+ } while (segs);
+
+ skb_queue_splice(&seg_list, list);
+ dev_kfree_skb(skb);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb_checksum_help(skb) < 0)
+ goto drop;
+
+ __skb_queue_head(list, skb);
+ } else {
+ struct net_device_stats *stats;
+
+drop:
+ stats = &tp->netdev->stats;
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ }
+}
+
+/*
+ * msdn_giant_send_check()
+ * According to the document of microsoft, the TCP Pseudo Header excludes the
+ * packet length for IPv6 TCP large packets.
+ */
+static int msdn_giant_send_check(struct sk_buff *skb)
+{
+ const struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ int ret;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ ipv6h = ipv6_hdr(skb);
+ th = tcp_hdr(skb);
+
+ th->check = 0;
+ th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
+
+ return ret;
+}
+
+static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
+ struct sk_buff *skb, u32 len, u32 transport_offset)
{
- memset(desc, 0, sizeof(*desc));
+ u32 mss = skb_shinfo(skb)->gso_size;
+ u32 opts1, opts2 = 0;
+ int ret = TX_CSUM_SUCCESS;
+
+ WARN_ON_ONCE(len > TX_LEN_MAX);
+
+ opts1 = len | TX_FS | TX_LS;
+
+ if (mss) {
+ if (transport_offset > GTTCPHO_MAX) {
+ netif_warn(tp, tx_err, tp->netdev,
+ "Invalid transport offset 0x%x for TSO\n",
+ transport_offset);
+ ret = TX_CSUM_TSO;
+ goto unavailable;
+ }
- desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+ switch (get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ opts1 |= GTSENDV4;
+ break;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- __be16 protocol;
+ case htons(ETH_P_IPV6):
+ if (msdn_giant_send_check(skb)) {
+ ret = TX_CSUM_TSO;
+ goto unavailable;
+ }
+ opts1 |= GTSENDV6;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ opts1 |= transport_offset << GTTCPHO_SHIFT;
+ opts2 |= min(mss, MSS_MAX) << MSS_SHIFT;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
- u32 opts2 = 0;
- if (skb->protocol == htons(ETH_P_8021Q))
- protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
+ if (transport_offset > TCPHO_MAX) {
+ netif_warn(tp, tx_err, tp->netdev,
+ "Invalid transport offset 0x%x\n",
+ transport_offset);
+ ret = TX_CSUM_NONE;
+ goto unavailable;
+ }
- switch (protocol) {
+ switch (get_protocol(skb)) {
case htons(ETH_P_IP):
opts2 |= IPV4_CS;
ip_protocol = ip_hdr(skb)->protocol;
@@ -1266,24 +1477,34 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
break;
}
- if (ip_protocol == IPPROTO_TCP) {
+ if (ip_protocol == IPPROTO_TCP)
opts2 |= TCP_CS;
- opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
- } else if (ip_protocol == IPPROTO_UDP) {
+ else if (ip_protocol == IPPROTO_UDP)
opts2 |= UDP_CS;
- } else {
+ else
WARN_ON_ONCE(1);
- }
- desc->opts2 = cpu_to_le32(opts2);
+ opts2 |= transport_offset << TCPHO_SHIFT;
}
+
+ desc->opts2 = cpu_to_le32(opts2);
+ desc->opts1 = cpu_to_le32(opts1);
+
+unavailable:
+ return ret;
}
static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
{
- int remain;
+ struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+ int remain, ret;
u8 *tx_data;
+ __skb_queue_head_init(&skb_head);
+ spin_lock(&tx_queue->lock);
+ skb_queue_splice_init(tx_queue, &skb_head);
+ spin_unlock(&tx_queue->lock);
+
tx_data = agg->head;
agg->skb_num = agg->skb_len = 0;
remain = rx_buf_sz;
@@ -1292,32 +1513,56 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
struct tx_desc *tx_desc;
struct sk_buff *skb;
unsigned int len;
+ u32 offset;
- skb = skb_dequeue(&tp->tx_queue);
+ skb = __skb_dequeue(&skb_head);
if (!skb)
break;
- remain -= sizeof(*tx_desc);
- len = skb->len;
- if (remain < len) {
- skb_queue_head(&tp->tx_queue, skb);
+ len = skb->len + sizeof(*tx_desc);
+
+ if (len > remain) {
+ __skb_queue_head(&skb_head, skb);
break;
}
tx_data = tx_agg_align(tx_data);
tx_desc = (struct tx_desc *)tx_data;
+
+ offset = (u32)skb_transport_offset(skb);
+
+ if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
+ r8152_csum_workaround(tp, skb, &skb_head);
+ continue;
+ }
+
tx_data += sizeof(*tx_desc);
- r8152_tx_csum(tp, tx_desc, skb);
- memcpy(tx_data, skb->data, len);
- agg->skb_num++;
+ len = skb->len;
+ if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
+ struct net_device_stats *stats = &tp->netdev->stats;
+
+ stats->tx_dropped++;
+ dev_kfree_skb_any(skb);
+ tx_data -= sizeof(*tx_desc);
+ continue;
+ }
+
+ tx_data += len;
agg->skb_len += len;
+ agg->skb_num++;
+
dev_kfree_skb_any(skb);
- tx_data += len;
remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
}
+ if (!skb_queue_empty(&skb_head)) {
+ spin_lock(&tx_queue->lock);
+ skb_queue_splice(&skb_head, tx_queue);
+ spin_unlock(&tx_queue->lock);
+ }
+
netif_tx_lock(tp->netdev);
if (netif_queue_stopped(tp->netdev) &&
@@ -1326,20 +1571,67 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
netif_tx_unlock(tp->netdev);
+ ret = usb_autopm_get_interface_async(tp->intf);
+ if (ret < 0)
+ goto out_tx_fill;
+
usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
agg->head, (int)(tx_data - (u8 *)agg->head),
(usb_complete_t)write_bulk_callback, agg);
- return usb_submit_urb(agg->urb, GFP_ATOMIC);
+ ret = usb_submit_urb(agg->urb, GFP_ATOMIC);
+ if (ret < 0)
+ usb_autopm_put_interface_async(tp->intf);
+
+out_tx_fill:
+ return ret;
+}
+
+static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
+{
+ u8 checksum = CHECKSUM_NONE;
+ u32 opts2, opts3;
+
+ if (tp->version == RTL_VER_01)
+ goto return_result;
+
+ opts2 = le32_to_cpu(rx_desc->opts2);
+ opts3 = le32_to_cpu(rx_desc->opts3);
+
+ if (opts2 & RD_IPV4_CS) {
+ if (opts3 & IPF)
+ checksum = CHECKSUM_NONE;
+ else if ((opts2 & RD_UDP_CS) && (opts3 & UDPF))
+ checksum = CHECKSUM_NONE;
+ else if ((opts2 & RD_TCP_CS) && (opts3 & TCPF))
+ checksum = CHECKSUM_NONE;
+ else
+ checksum = CHECKSUM_UNNECESSARY;
+ } else if (RD_IPV6_CS) {
+ if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
+ checksum = CHECKSUM_UNNECESSARY;
+ else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
+ checksum = CHECKSUM_UNNECESSARY;
+ }
+
+return_result:
+ return checksum;
}
static void rx_bottom(struct r8152 *tp)
{
unsigned long flags;
- struct list_head *cursor, *next;
+ struct list_head *cursor, *next, rx_queue;
+ if (list_empty(&tp->rx_done))
+ return;
+
+ INIT_LIST_HEAD(&rx_queue);
spin_lock_irqsave(&tp->rx_lock, flags);
- list_for_each_safe(cursor, next, &tp->rx_done) {
+ list_splice_init(&tp->rx_done, &rx_queue);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ list_for_each_safe(cursor, next, &rx_queue) {
struct rx_desc *rx_desc;
struct rx_agg *agg;
int len_used = 0;
@@ -1348,7 +1640,6 @@ static void rx_bottom(struct r8152 *tp)
int ret;
list_del_init(cursor);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
agg = list_entry(cursor, struct rx_agg, list);
urb = agg->urb;
@@ -1361,7 +1652,7 @@ static void rx_bottom(struct r8152 *tp)
while (urb->actual_length > len_used) {
struct net_device *netdev = tp->netdev;
- struct net_device_stats *stats;
+ struct net_device_stats *stats = &netdev->stats;
unsigned int pkt_len;
struct sk_buff *skb;
@@ -1373,23 +1664,24 @@ static void rx_bottom(struct r8152 *tp)
if (urb->actual_length < len_used)
break;
- stats = rtl8152_get_stats(netdev);
-
pkt_len -= CRC_SIZE;
rx_data += sizeof(struct rx_desc);
skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
if (!skb) {
stats->rx_dropped++;
- break;
+ goto find_next_rx;
}
+
+ skb->ip_summed = r8152_rx_csum(tp, rx_desc);
memcpy(skb->data, rx_data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += pkt_len;
+find_next_rx:
rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
rx_desc = (struct rx_desc *)rx_data;
len_used = (int)(rx_data - (u8 *)agg->head);
@@ -1398,13 +1690,13 @@ static void rx_bottom(struct r8152 *tp)
submit:
ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
- spin_lock_irqsave(&tp->rx_lock, flags);
if (ret && ret != -ENODEV) {
- list_add_tail(&agg->list, next);
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
tasklet_schedule(&tp->tl);
}
}
- spin_unlock_irqrestore(&tp->rx_lock, flags);
}
static void tx_bottom(struct r8152 *tp)
@@ -1423,19 +1715,18 @@ static void tx_bottom(struct r8152 *tp)
res = r8152_tx_agg_fill(tp, agg);
if (res) {
- struct net_device_stats *stats;
- struct net_device *netdev;
- unsigned long flags;
-
- netdev = tp->netdev;
- stats = rtl8152_get_stats(netdev);
+ struct net_device *netdev = tp->netdev;
if (res == -ENODEV) {
netif_device_detach(netdev);
} else {
+ struct net_device_stats *stats = &netdev->stats;
+ unsigned long flags;
+
netif_warn(tp, tx_err, netdev,
"failed tx_urb %d\n", res);
stats->tx_dropped += agg->skb_num;
+
spin_lock_irqsave(&tp->tx_lock, flags);
list_add_tail(&agg->list, &tp->tx_free);
spin_unlock_irqrestore(&tp->tx_lock, flags);
@@ -1475,6 +1766,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
return usb_submit_urb(agg->urb, mem_flags);
}
+static void rtl_drop_queued_tx(struct r8152 *tp)
+{
+ struct net_device_stats *stats = &tp->netdev->stats;
+ struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(tx_queue))
+ return;
+
+ __skb_queue_head_init(&skb_head);
+ spin_lock_bh(&tx_queue->lock);
+ skb_queue_splice_init(tx_queue, &skb_head);
+ spin_unlock_bh(&tx_queue->lock);
+
+ while ((skb = __skb_dequeue(&skb_head))) {
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ }
+}
+
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1538,7 +1849,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
}
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+ struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1546,13 +1857,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
skb_queue_tail(&tp->tx_queue, skb);
- if (list_empty(&tp->tx_free) &&
- skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
+ if (!list_empty(&tp->tx_free)) {
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ set_bit(SCHEDULE_TASKLET, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ } else {
+ usb_mark_last_busy(tp->udev);
+ tasklet_schedule(&tp->tl);
+ }
+ } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
netif_stop_queue(netdev);
- if (!list_empty(&tp->tx_free))
- tasklet_schedule(&tp->tl);
-
return NETDEV_TX_OK;
}
@@ -1610,6 +1925,18 @@ static void rtl_set_eee_plus(struct r8152 *tp)
}
}
+static void rxdy_gated_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ if (enable)
+ ocp_data |= RXDY_GATED_EN;
+ else
+ ocp_data &= ~RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+}
+
static int rtl_enable(struct r8152 *tp)
{
u32 ocp_data;
@@ -1621,9 +1948,7 @@ static int rtl_enable(struct r8152 *tp)
ocp_data |= CR_RE | CR_TE;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
INIT_LIST_HEAD(&tp->rx_done);
ret = 0;
@@ -1678,8 +2003,6 @@ static int rtl8153_enable(struct r8152 *tp)
static void rtl8152_disable(struct r8152 *tp)
{
- struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
- struct sk_buff *skb;
u32 ocp_data;
int i;
@@ -1687,17 +2010,12 @@ static void rtl8152_disable(struct r8152 *tp)
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- while ((skb = skb_dequeue(&tp->tx_queue))) {
- dev_kfree_skb(skb);
- stats->tx_dropped++;
- }
+ rtl_drop_queued_tx(tp);
for (i = 0; i < RTL8152_MAX_TX; i++)
usb_kill_urb(tp->tx_info[i].urb);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, true);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -1718,18 +2036,209 @@ static void rtl8152_disable(struct r8152 *tp)
rtl8152_nic_reset(tp);
}
+static void r8152_power_cut_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ if (enable)
+ ocp_data |= POWER_CUT;
+ else
+ ocp_data &= ~POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+ ocp_data &= ~RESUME_INDICATE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+}
+
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl_get_wol(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u32 wolopts = 0;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ if (!(ocp_data & LAN_WAKE_EN))
+ return 0;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ if (ocp_data & LINK_ON_WAKE_EN)
+ wolopts |= WAKE_PHY;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ if (ocp_data & UWF_EN)
+ wolopts |= WAKE_UCAST;
+ if (ocp_data & BWF_EN)
+ wolopts |= WAKE_BCAST;
+ if (ocp_data & MWF_EN)
+ wolopts |= WAKE_MCAST;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ if (ocp_data & MAGIC_EN)
+ wolopts |= WAKE_MAGIC;
+
+ return wolopts;
+}
+
+static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
+{
+ u32 ocp_data;
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data &= ~LINK_ON_WAKE_EN;
+ if (wolopts & WAKE_PHY)
+ ocp_data |= LINK_ON_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+ if (wolopts & WAKE_UCAST)
+ ocp_data |= UWF_EN;
+ if (wolopts & WAKE_BCAST)
+ ocp_data |= BWF_EN;
+ if (wolopts & WAKE_MCAST)
+ ocp_data |= MWF_EN;
+ if (wolopts & WAKE_ANY)
+ ocp_data |= LAN_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ ocp_data &= ~MAGIC_EN;
+ if (wolopts & WAKE_MAGIC)
+ ocp_data |= MAGIC_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+ if (wolopts & WAKE_ANY)
+ device_set_wakeup_enable(&tp->udev->dev, true);
+ else
+ device_set_wakeup_enable(&tp->udev->dev, false);
+}
+
+static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
+{
+ if (enable) {
+ u32 ocp_data;
+
+ __rtl_set_wol(tp, WAKE_ANY);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data |= LINK_OFF_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ } else {
+ __rtl_set_wol(tp, tp->saved_wolopts);
+ }
+}
+
+static void rtl_phy_reset(struct r8152 *tp)
+{
+ u16 data;
+ int i;
+
+ clear_bit(PHY_RESET, &tp->flags);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+
+ /* don't reset again before the previous one complete */
+ if (data & BMCR_RESET)
+ return;
+
+ data |= BMCR_RESET;
+ r8152_mdio_write(tp, MII_BMCR, data);
+
+ for (i = 0; i < 50; i++) {
+ msleep(20);
+ if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ }
+}
+
+static void rtl_clear_bp(struct r8152 *tp)
+{
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
+ mdelay(3);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
+}
+
+static void r8153_clear_bp(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
+ rtl_clear_bp(tp);
+}
+
+static void r8153_teredo_off(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
+}
+
+static void r8152b_disable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
+ msleep(20);
+}
+
+static inline void r8152b_enable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
+ LINKENA | DIS_SDSAVE);
+}
+
+static void r8152b_hw_phy_cfg(struct r8152 *tp)
+{
+ u16 data;
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ r8152b_disable_aldps(tp);
+
+ rtl_clear_bp(tp);
+
+ r8152b_enable_aldps(tp);
+ set_bit(PHY_RESET, &tp->flags);
+}
+
static void r8152b_exit_oob(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, true);
+ r8153_teredo_off(tp);
+ r8152b_hw_phy_cfg(tp);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@ -1835,10 +2344,6 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
- ocp_data |= MAGIC_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
ocp_data |= CPCR_RX_VLAN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
@@ -1851,36 +2356,26 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data |= RCR_APM | RCR_AM | RCR_AB;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static void r8152b_disable_aldps(struct r8152 *tp)
-{
- ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
- msleep(20);
-}
-
-static inline void r8152b_enable_aldps(struct r8152 *tp)
-{
- ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
- LINKENA | DIS_SDSAVE);
-}
-
static void r8153_hw_phy_cfg(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
- r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ r8153_clear_bp(tp);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -1916,9 +2411,11 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
data = sram_read(tp, SRAM_10M_AMP2);
data |= AMP_DN;
sram_write(tp, SRAM_10M_AMP2, data);
+
+ set_bit(PHY_RESET, &tp->flags);
}
-static void r8153_u1u2en(struct r8152 *tp, int enable)
+static void r8153_u1u2en(struct r8152 *tp, bool enable)
{
u8 u1u2[8];
@@ -1930,7 +2427,7 @@ static void r8153_u1u2en(struct r8152 *tp, int enable)
usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
}
-static void r8153_u2p3en(struct r8152 *tp, int enable)
+static void r8153_u2p3en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -1942,7 +2439,7 @@ static void r8153_u2p3en(struct r8152 *tp, int enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
}
-static void r8153_power_cut_en(struct r8152 *tp, int enable)
+static void r8153_power_cut_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -1958,28 +2455,12 @@ static void r8153_power_cut_en(struct r8152 *tp, int enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
}
-static void r8153_teredo_off(struct r8152 *tp)
-{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
- ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
-}
-
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
int i;
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
-
+ rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -2072,10 +2553,6 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
- ocp_data |= MAGIC_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
ocp_data &= ~TEREDO_WAKE_MASK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
@@ -2092,11 +2569,7 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data |= RCR_APM | RCR_AM | RCR_AB;
@@ -2187,12 +2660,26 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
+ if (test_bit(PHY_RESET, &tp->flags))
+ bmcr |= BMCR_RESET;
+
if (tp->mii.supports_gmii)
r8152_mdio_write(tp, MII_CTRL1000, gbcr);
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
+ if (test_bit(PHY_RESET, &tp->flags)) {
+ int i;
+
+ clear_bit(PHY_RESET, &tp->flags);
+ for (i = 0; i < 50; i++) {
+ msleep(20);
+ if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ }
+ }
+
out:
return ret;
@@ -2200,12 +2687,7 @@ out:
static void rtl8152_down(struct r8152 *tp)
{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data &= ~POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
+ r8152_power_cut_en(tp, false);
r8152b_disable_aldps(tp);
r8152b_enter_oob(tp);
r8152b_enable_aldps(tp);
@@ -2213,8 +2695,8 @@ static void rtl8152_down(struct r8152 *tp)
static void rtl8153_down(struct r8152 *tp)
{
- r8153_u1u2en(tp, 0);
- r8153_power_cut_en(tp, 0);
+ r8153_u1u2en(tp, false);
+ r8153_power_cut_en(tp, false);
r8153_disable_aldps(tp);
r8153_enter_oob(tp);
r8153_enable_aldps(tp);
@@ -2249,6 +2731,9 @@ static void rtl_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, schedule.work);
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
if (!test_bit(WORK_ENABLE, &tp->flags))
goto out1;
@@ -2261,8 +2746,17 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
+ if (test_bit(SCHEDULE_TASKLET, &tp->flags) &&
+ (tp->speed & LINK_STATUS)) {
+ clear_bit(SCHEDULE_TASKLET, &tp->flags);
+ tasklet_schedule(&tp->tl);
+ }
+
+ if (test_bit(PHY_RESET, &tp->flags))
+ rtl_phy_reset(tp);
+
out1:
- return;
+ usb_autopm_put_interface(tp->intf);
}
static int rtl8152_open(struct net_device *netdev)
@@ -2270,6 +2764,27 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ res = alloc_all_mem(tp);
+ if (res)
+ goto out;
+
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0) {
+ free_all_mem(tp);
+ goto out;
+ }
+
+ /* The WORK_ENABLE may be set when autoresume occurs */
+ if (test_bit(WORK_ENABLE, &tp->flags)) {
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+ cancel_delayed_work_sync(&tp->schedule);
+ if (tp->speed & LINK_STATUS)
+ tp->rtl_ops.disable(tp);
+ }
+
+ tp->rtl_ops.up(tp);
+
rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
@@ -2277,15 +2792,19 @@ static int rtl8152_open(struct net_device *netdev)
netif_carrier_off(netdev);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
+
res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
if (res) {
if (res == -ENODEV)
netif_device_detach(tp->netdev);
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
+ free_all_mem(tp);
}
+ usb_autopm_put_interface(tp->intf);
+out:
return res;
}
@@ -2298,33 +2817,30 @@ static int rtl8152_close(struct net_device *netdev)
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
- tasklet_disable(&tp->tl);
- tp->rtl_ops.disable(tp);
- tasklet_enable(&tp->tl);
- return res;
-}
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0) {
+ rtl_drop_queued_tx(tp);
+ } else {
+ /*
+ * The autosuspend may have been enabled and wouldn't
+ * be disable when autoresume occurs, because the
+ * netif_running() would be false.
+ */
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, false);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ }
-static void rtl_clear_bp(struct r8152 *tp)
-{
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
- mdelay(3);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
- ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
-}
+ tasklet_disable(&tp->tl);
+ tp->rtl_ops.down(tp);
+ tasklet_enable(&tp->tl);
+ usb_autopm_put_interface(tp->intf);
+ }
-static void r8153_clear_bp(struct r8152 *tp)
-{
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
- ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
- rtl_clear_bp(tp);
+ free_all_mem(tp);
+
+ return res;
}
static void r8152b_enable_eee(struct r8152 *tp)
@@ -2375,18 +2891,18 @@ static void r8152b_enable_fc(struct r8152 *tp)
r8152_mdio_write(tp, MII_ADVERTISE, anar);
}
-static void r8152b_hw_phy_cfg(struct r8152 *tp)
+static void rtl_tally_reset(struct r8152 *tp)
{
- r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
- r8152b_disable_aldps(tp);
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY);
+ ocp_data |= TALLY_RESET;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY, ocp_data);
}
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
- int i;
-
- rtl_clear_bp(tp);
if (tp->version == RTL_VER_01) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
@@ -2394,17 +2910,7 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
}
- r8152b_hw_phy_cfg(tp);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data &= ~POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RESUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
-
- r8152b_exit_oob(tp);
+ r8152_power_cut_en(tp, false);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH;
@@ -2420,14 +2926,7 @@ static void r8152b_init(struct r8152 *tp)
r8152b_enable_eee(tp);
r8152b_enable_aldps(tp);
r8152b_enable_fc(tp);
-
- r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
- BMCR_ANRESTART);
- for (i = 0; i < 100; i++) {
- udelay(100);
- if (!(r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET))
- break;
- }
+ rtl_tally_reset(tp);
/* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
@@ -2440,7 +2939,7 @@ static void r8153_init(struct r8152 *tp)
u32 ocp_data;
int i;
- r8153_u1u2en(tp, 0);
+ r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
@@ -2456,14 +2955,12 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
- r8153_u2p3en(tp, 0);
+ r8153_u2p3en(tp, false);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
ocp_data &= ~TIMER11_EN;
ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
- r8153_clear_bp(tp);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
ocp_data &= ~LED_MODE_MASK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
@@ -2481,10 +2978,8 @@ static void r8153_init(struct r8152 *tp)
ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
- r8153_power_cut_en(tp, 0);
- r8153_u1u2en(tp, 1);
-
- r8153_first_init(tp);
+ r8153_power_cut_en(tp, false);
+ r8153_u1u2en(tp, true);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
@@ -2499,26 +2994,31 @@ static void r8153_init(struct r8152 *tp)
r8153_enable_eee(tp);
r8153_enable_aldps(tp);
r8152b_enable_fc(tp);
-
- r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
- BMCR_ANRESTART);
+ rtl_tally_reset(tp);
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
- netif_device_detach(tp->netdev);
+ if (PMSG_IS_AUTO(message))
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+ else
+ netif_device_detach(tp->netdev);
if (netif_running(tp->netdev)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
- tasklet_disable(&tp->tl);
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, true);
+ } else {
+ tasklet_disable(&tp->tl);
+ tp->rtl_ops.down(tp);
+ tasklet_enable(&tp->tl);
+ }
}
- tp->rtl_ops.down(tp);
-
return 0;
}
@@ -2526,22 +3026,77 @@ static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
- tp->rtl_ops.init(tp);
- netif_device_attach(tp->netdev);
+ if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ tp->rtl_ops.init(tp);
+ netif_device_attach(tp->netdev);
+ }
+
if (netif_running(tp->netdev)) {
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, false);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ if (tp->speed & LINK_STATUS)
+ tp->rtl_ops.disable(tp);
+ } else {
+ tp->rtl_ops.up(tp);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
+ }
tp->speed = 0;
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
- tasklet_enable(&tp->tl);
}
return 0;
}
+static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl_get_wol(tp);
+
+ usb_autopm_put_interface(tp->intf);
+}
+
+static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out_set_wol;
+
+ __rtl_set_wol(tp, wol->wolopts);
+ tp->saved_wolopts = wol->wolopts & WAKE_ANY;
+
+ usb_autopm_put_interface(tp->intf);
+
+out_set_wol:
+ return ret;
+}
+
+static u32 rtl8152_get_msglevel(struct net_device *dev)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ return tp->msg_enable;
+}
+
+static void rtl8152_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ tp->msg_enable = value;
+}
+
static void rtl8152_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -2566,8 +3121,76 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
- return rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+ ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
+}
+
+static const char rtl8152_gstrings[][ETH_GSTRING_LEN] = {
+ "tx_packets",
+ "rx_packets",
+ "tx_errors",
+ "rx_errors",
+ "rx_missed",
+ "align_errors",
+ "tx_single_collisions",
+ "tx_multi_collisions",
+ "rx_unicast",
+ "rx_broadcast",
+ "rx_multicast",
+ "tx_aborted",
+ "tx_underrun",
+};
+
+static int rtl8152_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rtl8152_gstrings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void rtl8152_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct r8152 *tp = netdev_priv(dev);
+ struct tally_counter tally;
+
+ generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
+
+ data[0] = le64_to_cpu(tally.tx_packets);
+ data[1] = le64_to_cpu(tally.rx_packets);
+ data[2] = le64_to_cpu(tally.tx_errors);
+ data[3] = le32_to_cpu(tally.rx_errors);
+ data[4] = le16_to_cpu(tally.rx_missed);
+ data[5] = le16_to_cpu(tally.align_errors);
+ data[6] = le32_to_cpu(tally.tx_one_collision);
+ data[7] = le32_to_cpu(tally.tx_multi_collision);
+ data[8] = le64_to_cpu(tally.rx_unicast);
+ data[9] = le64_to_cpu(tally.rx_broadcast);
+ data[10] = le32_to_cpu(tally.rx_multicast);
+ data[11] = le16_to_cpu(tally.tx_aborted);
+ data[12] = le16_to_cpu(tally.tx_underun);
+}
+
+static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
+ break;
+ }
}
static struct ethtool_ops ops = {
@@ -2575,13 +3198,24 @@ static struct ethtool_ops ops = {
.get_settings = rtl8152_get_settings,
.set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
+ .get_msglevel = rtl8152_get_msglevel,
+ .set_msglevel = rtl8152_set_msglevel,
+ .get_wol = rtl8152_get_wol,
+ .set_wol = rtl8152_set_wol,
+ .get_strings = rtl8152_get_strings,
+ .get_sset_count = rtl8152_get_sset_count,
+ .get_ethtool_stats = rtl8152_get_ethtool_stats,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
struct r8152 *tp = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(rq);
- int res = 0;
+ int res;
+
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0)
+ goto out;
switch (cmd) {
case SIOCGMIIPHY:
@@ -2604,6 +3238,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
res = -EOPNOTSUPP;
}
+ usb_autopm_put_interface(tp->intf);
+
+out:
return res;
}
@@ -2656,22 +3293,13 @@ static void r8152b_get_version(struct r8152 *tp)
static void rtl8152_unload(struct r8152 *tp)
{
- u32 ocp_data;
-
- if (tp->version != RTL_VER_01) {
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data |= POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
- }
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RESUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+ if (tp->version != RTL_VER_01)
+ r8152_power_cut_en(tp, true);
}
static void rtl8153_unload(struct r8152 *tp)
{
- r8153_power_cut_en(tp, 1);
+ r8153_power_cut_en(tp, true);
}
static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -2686,6 +3314,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8152b_init;
ops->enable = rtl8152_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8152b_exit_oob;
ops->down = rtl8152_down;
ops->unload = rtl8152_unload;
ret = 0;
@@ -2694,6 +3323,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8153_first_init;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;
@@ -2709,6 +3339,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8153_first_init;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;
@@ -2766,9 +3397,15 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
- netdev->features |= NETIF_F_IP_CSUM;
- netdev->hw_features = NETIF_F_IP_CSUM;
+ netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO6;
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_FRAGLIST |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
SET_ETHTOOL_OPS(netdev, &ops);
+ netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
tp->mii.dev = netdev;
tp->mii.mdio_read = read_mii_word;
@@ -2778,14 +3415,12 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.phy_id = R8152_PHY_ID;
tp->mii.supports_gmii = 0;
+ intf->needs_remote_wakeup = 1;
+
r8152b_get_version(tp);
tp->rtl_ops.init(tp);
set_ethernet_addr(tp);
- ret = alloc_all_mem(tp);
- if (ret)
- goto out;
-
usb_set_intfdata(intf, tp);
ret = register_netdev(netdev);
@@ -2794,6 +3429,12 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
+ tp->saved_wolopts = __rtl_get_wol(tp);
+ if (tp->saved_wolopts)
+ device_set_wakeup_enable(&udev->dev, true);
+ else
+ device_set_wakeup_enable(&udev->dev, false);
+
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
@@ -2815,7 +3456,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
tp->rtl_ops.unload(tp);
- free_all_mem(tp);
free_netdev(tp->netdev);
}
}
@@ -2838,6 +3478,8 @@ static struct usb_driver rtl8152_driver = {
.suspend = rtl8152_suspend,
.resume = rtl8152_resume,
.reset_resume = rtl8152_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
};
module_usb_driver(rtl8152_driver);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c0e7c64765ab..b4a10bcb66a0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -14,6 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/u64_stats_sync.h>
+#include <net/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <linux/veth.h>
@@ -155,10 +156,10 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
unsigned int start;
do {
- start = u64_stats_fetch_begin_bh(&stats->syncp);
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
packets = stats->packets;
bytes = stats->bytes;
- } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
result->packets += packets;
result->bytes += bytes;
}
@@ -235,18 +236,9 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev)
{
- int i;
-
- dev->vstats = alloc_percpu(struct pcpu_vstats);
+ dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
if (!dev->vstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_vstats *veth_stats;
- veth_stats = per_cpu_ptr(dev->vstats, i);
- u64_stats_init(&veth_stats->syncp);
- }
-
return 0;
}
@@ -336,10 +328,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = nla_parse(peer_tb, IFLA_MAX,
- nla_data(nla_peer) + sizeof(struct ifinfomsg),
- nla_len(nla_peer) - sizeof(struct ifinfomsg),
- ifla_policy);
+ err = rtnl_nla_parse_ifla(peer_tb,
+ nla_data(nla_peer) + sizeof(struct ifinfomsg),
+ nla_len(nla_peer) - sizeof(struct ifinfomsg));
if (err < 0)
return err;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 470b01f3e7b4..7b687469199b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -882,7 +882,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_warn(&dev->dev,
"Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
dev->stats.tx_dropped++;
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
virtqueue_kick(sq->vq);
@@ -1000,16 +1000,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
u64 tpackets, tbytes, rpackets, rbytes;
do {
- start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
+ start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
tpackets = stats->tx_packets;
tbytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
+ start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
rpackets = stats->rx_packets;
rbytes = stats->rx_bytes;
- } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0fa3b44f7342..97394345e5dd 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1078,7 +1078,7 @@ unlock_drop_pkt:
spin_unlock_irqrestore(&tq->tx_lock, flags);
drop_pkt:
tq->stats.drop_total++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2738,47 +2738,35 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
/*
* Enable MSIx vectors.
* Returns :
- * 0 on successful enabling of required vectors,
* VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
- * could be enabled.
- * number of vectors which can be enabled otherwise (this number is smaller
+ * were enabled.
+ * number of vectors which were enabled otherwise (this number is greater
* than VMXNET3_LINUX_MIN_MSIX_VECT)
*/
static int
-vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
- int vectors)
-{
- int err = 0, vector_threshold;
- vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
-
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
- vectors);
- if (!err) {
- adapter->intr.num_intrs = vectors;
- return 0;
- } else if (err < 0) {
- dev_err(&adapter->netdev->dev,
- "Failed to enable MSI-X, error: %d\n", err);
- vectors = 0;
- } else if (err < vector_threshold) {
- break;
- } else {
- /* If fails to enable required number of MSI-x vectors
- * try enabling minimum number of vectors required.
- */
- dev_err(&adapter->netdev->dev,
- "Failed to enable %d MSI-X, trying %d instead\n",
- vectors, vector_threshold);
- vectors = vector_threshold;
- }
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
+{
+ int ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries, nvec, nvec);
+
+ if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable %d MSI-X, trying %d\n",
+ nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
+
+ ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries,
+ VMXNET3_LINUX_MIN_MSIX_VECT,
+ VMXNET3_LINUX_MIN_MSIX_VECT);
}
- dev_info(&adapter->pdev->dev,
- "Number of MSI-X interrupts which can be allocated "
- "is lower than min threshold required.\n");
- return err;
+ if (ret < 0) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable MSI-X, error: %d\n", ret);
+ }
+
+ return ret;
}
@@ -2805,56 +2793,50 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- int vector, err = 0;
-
- adapter->intr.num_intrs = (adapter->share_intr ==
- VMXNET3_INTR_TXSHARE) ? 1 :
- adapter->num_tx_queues;
- adapter->intr.num_intrs += (adapter->share_intr ==
- VMXNET3_INTR_BUDDYSHARE) ? 0 :
- adapter->num_rx_queues;
- adapter->intr.num_intrs += 1; /* for link event */
-
- adapter->intr.num_intrs = (adapter->intr.num_intrs >
- VMXNET3_LINUX_MIN_MSIX_VECT
- ? adapter->intr.num_intrs :
- VMXNET3_LINUX_MIN_MSIX_VECT);
-
- for (vector = 0; vector < adapter->intr.num_intrs; vector++)
- adapter->intr.msix_entries[vector].entry = vector;
-
- err = vmxnet3_acquire_msix_vectors(adapter,
- adapter->intr.num_intrs);
+ int i, nvec;
+
+ nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
+ 1 : adapter->num_tx_queues;
+ nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
+ 0 : adapter->num_rx_queues;
+ nvec += 1; /* for link event */
+ nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
+ nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
+
+ for (i = 0; i < nvec; i++)
+ adapter->intr.msix_entries[i].entry = i;
+
+ nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
+ if (nvec < 0)
+ goto msix_err;
+
/* If we cannot allocate one MSIx vector per queue
* then limit the number of rx queues to 1
*/
- if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
netdev_err(adapter->netdev,
"Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
- adapter->intr.num_intrs =
- VMXNET3_LINUX_MIN_MSIX_VECT;
}
- return;
}
- if (!err)
- return;
+ adapter->intr.num_intrs = nvec;
+ return;
+
+msix_err:
/* If we cannot allocate MSIx vectors use only one rx queue */
dev_info(&adapter->pdev->dev,
"Failed to enable MSI-X, error %d. "
- "Limiting #rx queues to 1, try MSI.\n", err);
+ "Limiting #rx queues to 1, try MSI.\n", nvec);
adapter->intr.type = VMXNET3_IT_MSI;
}
if (adapter->intr.type == VMXNET3_IT_MSI) {
- int err;
- err = pci_enable_msi(adapter->pdev);
- if (!err) {
+ if (!pci_enable_msi(adapter->pdev)) {
adapter->num_rx_queues = 1;
adapter->intr.num_intrs = 1;
return;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1236812c7be6..0d862a5077ab 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1132,7 +1132,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
- __be16 port;
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1150,8 +1149,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
goto drop;
- port = inet_sk(sk)->inet_sport;
-
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
@@ -2080,19 +2077,11 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_sock *vs;
- int i;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *vxlan_stats;
- vxlan_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&vxlan_stats->syncp);
- }
-
-
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
if (vs) {
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 48896138418f..a9970f1af976 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -374,8 +374,7 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
- if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_cow_head(skb, 0))
goto drop;
if (i2400m->state == I2400M_SS_IDLE)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 200020eb3005..b2137e8f7ca6 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -53,7 +53,7 @@ config LIBERTAS_THINFIRM_USB
config AIRO
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
- depends on ISA_DMA_API && (PCI || BROKEN)
+ depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN)
select WIRELESS_EXT
select CRYPTO
select WEXT_SPY
@@ -73,7 +73,7 @@ config AIRO
config ATMEL
tristate "Atmel at76c50x chipset 802.11b support"
- depends on (PCI || PCMCIA)
+ depends on CFG80211 && (PCI || PCMCIA)
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
@@ -116,7 +116,7 @@ config AT76C50X_USB
config AIRO_CS
tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
- depends on PCMCIA && (BROKEN || !M32R)
+ depends on CFG80211 && PCMCIA && (BROKEN || !M32R)
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
@@ -138,7 +138,7 @@ config AIRO_CS
config PCMCIA_WL3501
tristate "Planet WL3501 PCMCIA cards"
- depends on PCMCIA
+ depends on CFG80211 && PCMCIA
select WIRELESS_EXT
select WEXT_SPY
help
@@ -168,7 +168,7 @@ config PRISM54
config USB_ZD1201
tristate "USB ZD1201 based Wireless device support"
- depends on USB
+ depends on CFG80211 && USB
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
@@ -281,5 +281,6 @@ source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
source "drivers/net/wireless/mwifiex/Kconfig"
source "drivers/net/wireless/cw1200/Kconfig"
+source "drivers/net/wireless/rsi/Kconfig"
endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0fab227025be..0c8891686718 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -59,3 +59,4 @@ obj-$(CONFIG_BRCMFMAC) += brcm80211/
obj-$(CONFIG_BRCMSMAC) += brcm80211/
obj-$(CONFIG_CW1200) += cw1200/
+obj-$(CONFIG_RSI_91X) += rsi/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index edf4b57c4aaa..64747d457bb3 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -36,7 +36,7 @@
#include <linux/bitops.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
#include <linux/netdevice.h>
@@ -45,11 +45,11 @@
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include <linux/pci.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
-#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
#include <net/iw_handler.h>
#include "airo.h"
@@ -5797,7 +5797,7 @@ static int airo_set_freq(struct net_device *dev,
/* Hack to fall through... */
fwrq->e = 0;
- fwrq->m = ieee80211_freq_to_dsss_chan(f);
+ fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
if((fwrq->m > 1000) || (fwrq->e > 0))
@@ -5841,7 +5841,8 @@ static int airo_get_freq(struct net_device *dev,
ch = le16_to_cpu(status_rid.channel);
if((ch > 0) && (ch < 15)) {
- fwrq->m = ieee80211_dsss_chan_to_freq(ch) * 100000;
+ fwrq->m = 100000 *
+ ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ);
fwrq->e = 1;
} else {
fwrq->m = ch;
@@ -6898,7 +6899,8 @@ static int airo_get_range(struct net_device *dev,
k = 0;
for(i = 0; i < 14; i++) {
range->freq[k].i = i + 1; /* List index */
- range->freq[k].m = ieee80211_dsss_chan_to_freq(i + 1) * 100000;
+ range->freq[k].m = 100000 *
+ ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ);
range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
}
range->num_frequency = k;
@@ -7297,7 +7299,8 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add frequency */
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
- iwe.u.freq.m = ieee80211_dsss_chan_to_freq(iwe.u.freq.m) * 100000;
+ iwe.u.freq.m = 100000 *
+ ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ);
iwe.u.freq.e = 1;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index b59cfbe0276b..a889fd66fc63 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -56,6 +56,15 @@ enum ath_device_state {
ATH_HW_INITIALIZED,
};
+enum ath_op_flags {
+ ATH_OP_INVALID,
+ ATH_OP_BEACONS,
+ ATH_OP_ANI_RUN,
+ ATH_OP_PRIM_STA_VIF,
+ ATH_OP_HW_RESET,
+ ATH_OP_SCANNING,
+};
+
enum ath_bus_type {
ATH_PCI,
ATH_AHB,
@@ -63,7 +72,7 @@ enum ath_bus_type {
};
struct reg_dmn_pair_mapping {
- u16 regDmnEnum;
+ u16 reg_domain;
u16 reg_5ghz_ctl;
u16 reg_2ghz_ctl;
};
@@ -130,6 +139,7 @@ struct ath_common {
struct ieee80211_hw *hw;
int debug_mask;
enum ath_device_state state;
+ unsigned long op_flags;
struct ath_ani ani;
@@ -161,6 +171,9 @@ struct ath_common {
bool btcoex_enabled;
bool disable_ani;
bool bt_ant_diversity;
+
+ int last_rssi;
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index d44d618b05f9..a79499c82350 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -266,12 +266,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* ath10k_ce_sendlist_send.
* The caller takes responsibility for any needed locking.
*/
-static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
- void *per_transfer_context,
- u32 buffer,
- unsigned int nbytes,
- unsigned int transfer_id,
- unsigned int flags)
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -1067,9 +1067,9 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
*
* For the lack of a better place do the check here.
*/
- BUILD_BUG_ON(TARGET_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ret = ath10k_pci_wake(ar);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 67dbde6a5c74..8eb7f99ed992 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -23,7 +23,7 @@
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 8
-#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
@@ -152,6 +152,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
unsigned int transfer_id,
unsigned int flags);
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ u32 buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags);
+
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 3b59af3bddf4..ebc5fc2ede75 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -55,8 +55,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
- ar->is_target_paused = true;
- wake_up(&ar->event_queue);
+ complete(&ar->target_suspend);
}
static int ath10k_init_connect_htc(struct ath10k *ar)
@@ -470,8 +469,12 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
if (index == ie_len)
break;
- if (data[index] & (1 << bit))
+ if (data[index] & (1 << bit)) {
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "Enabling feature bit: %i\n",
+ i);
__set_bit(i, ar->fw_features);
+ }
}
ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
@@ -699,6 +702,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
+ init_completion(&ar->target_suspend);
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
@@ -722,8 +726,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- init_waitqueue_head(&ar->event_queue);
-
INIT_WORK(&ar->restart_work, ath10k_core_restart);
return ar;
@@ -856,10 +858,34 @@ err:
}
EXPORT_SYMBOL(ath10k_core_start);
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+ int ret;
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
+ if (ret) {
+ ath10k_warn("could not suspend target (%d)\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+
+ if (ret == 0) {
+ ath10k_warn("suspend timed out - target pause event never came\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
void ath10k_core_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ /* try to suspend target */
+ ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index ade1781c7186..0e71979d837c 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -46,21 +46,35 @@
#define ATH10K_MAX_NUM_MGMT_PENDING 128
+/* number of failed packets */
+#define ATH10K_KICKOUT_THRESHOLD 50
+
+/*
+ * Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH10K_KEEPALIVE_MIN_IDLE 3747
+#define ATH10K_KEEPALIVE_MAX_IDLE 3895
+#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
struct ath10k;
struct ath10k_skb_cb {
dma_addr_t paddr;
- bool is_mapped;
- bool is_aborted;
u8 vdev_id;
struct {
u8 tid;
bool is_offchan;
-
- u8 frag_len;
- u8 pad_len;
+ struct ath10k_htt_txbuf *txbuf;
+ u32 txbuf_paddr;
} __packed htt;
+
+ struct {
+ bool dtim_zero;
+ bool deliver_cab;
+ } bcn;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -70,32 +84,6 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
}
-static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
-{
- if (ATH10K_SKB_CB(skb)->is_mapped)
- return -EINVAL;
-
- ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
- return -EIO;
-
- ATH10K_SKB_CB(skb)->is_mapped = true;
- return 0;
-}
-
-static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
-{
- if (!ATH10K_SKB_CB(skb)->is_mapped)
- return -EINVAL;
-
- dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
- DMA_TO_DEVICE);
- ATH10K_SKB_CB(skb)->is_mapped = false;
- return 0;
-}
-
static inline u32 host_interest_item_address(u32 item_offset)
{
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
@@ -211,6 +199,18 @@ struct ath10k_peer {
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
+struct ath10k_sta {
+ struct ath10k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+
+ struct work_struct update_wk;
+};
+
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
struct ath10k_vif {
@@ -222,10 +222,17 @@ struct ath10k_vif {
u32 beacon_interval;
u32 dtim_period;
struct sk_buff *beacon;
+ /* protected by data_lock */
+ bool beacon_sent;
struct ath10k *ar;
struct ieee80211_vif *vif;
+ bool is_started;
+ bool is_up;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+
struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
u8 def_wep_key_idx;
@@ -235,7 +242,6 @@ struct ath10k_vif {
union {
struct {
- u8 bssid[ETH_ALEN];
u32 uapsd;
} sta;
struct {
@@ -249,13 +255,11 @@ struct ath10k_vif {
u32 noa_len;
u8 *noa_data;
} ap;
- struct {
- u8 bssid[ETH_ALEN];
- } ibss;
} u;
u8 fixed_rate;
u8 fixed_nss;
+ u8 force_sgi;
};
struct ath10k_vif_iter {
@@ -355,8 +359,7 @@ struct ath10k {
const struct ath10k_hif_ops *ops;
} hif;
- wait_queue_head_t event_queue;
- bool is_target_paused;
+ struct completion target_suspend;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
@@ -412,6 +415,9 @@ struct ath10k {
/* valid during scan; needed for mgmt rx during scan */
struct ieee80211_channel *scan_channel;
+ /* current operating channel definition */
+ struct cfg80211_chan_def chandef;
+
int free_vdev_map;
int monitor_vdev_id;
bool monitor_enabled;
@@ -470,6 +476,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar);
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 1773c36c71a0..a5824990bd2a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -92,7 +92,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
#ifdef CONFIG_ATH10K_DEBUG
__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
- const char *fmt, ...);
+ const char *fmt, ...);
void ath10k_dbg_dump(enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index dcdea68bcc0a..2ac7beacddca 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -21,6 +21,14 @@
#include <linux/kernel.h>
#include "core.h"
+struct ath10k_hif_sg_item {
+ u16 transfer_id;
+ void *transfer_context; /* NULL = tx completion callback not called */
+ void *vaddr; /* for debugging mostly */
+ u32 paddr;
+ u16 len;
+};
+
struct ath10k_hif_cb {
int (*tx_completion)(struct ath10k *ar,
struct sk_buff *wbuf,
@@ -31,11 +39,9 @@ struct ath10k_hif_cb {
};
struct ath10k_hif_ops {
- /* Send the head of a buffer to HIF for transmission to the target. */
- int (*send_head)(struct ath10k *ar, u8 pipe_id,
- unsigned int transfer_id,
- unsigned int nbytes,
- struct sk_buff *buf);
+ /* send a scatter-gather list to the target */
+ int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items);
/*
* API to handle HIF-specific BMI message exchanges, this API is
@@ -86,12 +92,11 @@ struct ath10k_hif_ops {
};
-static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
- unsigned int transfer_id,
- unsigned int nbytes,
- struct sk_buff *buf)
+static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items,
+ int n_items)
{
- return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
+ return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
}
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index edc57ab505c8..7f1bccd3597f 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -63,7 +63,9 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
struct sk_buff *skb)
{
- ath10k_skb_unmap(htc->ar->dev, skb);
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+
+ dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
@@ -122,6 +124,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct ath10k_hif_sg_item sg_item;
+ struct device *dev = htc->ar->dev;
int credits = 0;
int ret;
@@ -157,19 +162,25 @@ int ath10k_htc_send(struct ath10k_htc *htc,
ath10k_htc_prepare_tx_skb(ep, skb);
- ret = ath10k_skb_map(htc->ar->dev, skb);
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret)
goto err_credits;
- ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
- skb->len, skb);
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = skb;
+ sg_item.vaddr = skb->data;
+ sg_item.paddr = skb_cb->paddr;
+ sg_item.len = skb->len;
+
+ ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
if (ret)
goto err_unmap;
return 0;
err_unmap:
- ath10k_skb_unmap(htc->ar->dev, skb);
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
@@ -191,10 +202,8 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
- if (!skb) {
- ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
+ if (WARN_ON_ONCE(!skb))
return 0;
- }
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index b93ae355bc08..654867fc1ae7 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -20,6 +20,7 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
+#include <linux/dmapool.h>
#include "htc.h"
#include "rx_desc.h"
@@ -1181,11 +1182,20 @@ struct htt_rx_info {
u32 info1;
u32 info2;
} rate;
+
+ u32 tsf;
bool fcs_err;
bool amsdu_more;
bool mic_err;
};
+struct ath10k_htt_txbuf {
+ struct htt_data_tx_desc_frag frags[2];
+ struct ath10k_htc_hdr htc_hdr;
+ struct htt_cmd_hdr cmd_hdr;
+ struct htt_data_tx_desc cmd_tx;
+} __packed;
+
struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
@@ -1267,11 +1277,18 @@ struct ath10k_htt {
struct sk_buff **pending_tx;
unsigned long *used_msdu_ids; /* bitmap */
wait_queue_head_t empty_tx_wq;
+ struct dma_pool *tx_pool;
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
struct tasklet_struct rx_replenish_task;
+
+ /* This is used to group tx/rx completions separately and process them
+ * in batches to reduce cache stalls */
+ struct tasklet_struct txrx_compl_task;
+ struct sk_buff_head tx_compl_q;
+ struct sk_buff_head rx_compl_q;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1343,4 +1360,5 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index fe8bd1b59f0e..cdcbe2de95f9 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -43,7 +43,7 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
-
+static void ath10k_htt_txrx_compl_task(unsigned long ptr);
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
@@ -225,18 +225,16 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
ath10k_htt_rx_msdu_buff_replenish(htt);
}
-static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
-{
- return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
- htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
-}
-
void ath10k_htt_rx_detach(struct ath10k_htt *htt)
{
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->rx_replenish_task);
+ tasklet_kill(&htt->txrx_compl_task);
+
+ skb_queue_purge(&htt->tx_compl_q);
+ skb_queue_purge(&htt->rx_compl_q);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
struct sk_buff *skb =
@@ -270,10 +268,12 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
int idx;
struct sk_buff *msdu;
- spin_lock_bh(&htt->rx_ring.lock);
+ lockdep_assert_held(&htt->rx_ring.lock);
- if (ath10k_htt_rx_ring_elems(htt) == 0)
- ath10k_warn("htt rx ring is empty!\n");
+ if (htt->rx_ring.fill_cnt == 0) {
+ ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
+ return NULL;
+ }
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
@@ -283,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = idx;
htt->rx_ring.fill_cnt--;
- spin_unlock_bh(&htt->rx_ring.lock);
return msdu;
}
@@ -307,8 +306,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
- if (ath10k_htt_rx_ring_elems(htt) == 0)
- ath10k_warn("htt rx ring is empty!\n");
+ lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused) {
ath10k_warn("htt is confused. refusing rx\n");
@@ -324,7 +322,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
rx_desc = (struct htt_rx_desc *)msdu->data;
@@ -400,6 +398,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc->frag_info.ring2_more_count;
+ msdu_chaining = msdu_chained;
if (msdu_len_invalid)
msdu_len = 0;
@@ -417,8 +416,8 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
next->len + skb_tailroom(next),
DMA_FROM_DEVICE);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
- next->data,
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
+ "htt rx chained: ", next->data,
next->len + skb_tailroom(next));
skb_trim(next, 0);
@@ -427,13 +426,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->next = next;
msdu = next;
- msdu_chaining = 1;
- }
-
- if (msdu_len > 0) {
- /* This may suggest FW bug? */
- ath10k_warn("htt rx msdu len not consumed (%d)\n",
- msdu_len);
}
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
@@ -535,6 +527,12 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
(unsigned long)htt);
+ skb_queue_head_init(&htt->tx_compl_q);
+ skb_queue_head_init(&htt->rx_compl_q);
+
+ tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
+ (unsigned long)htt);
+
ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@@ -638,6 +636,12 @@ struct amsdu_subframe_hdr {
__be16 len;
} __packed;
+static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
+{
+ /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
+ return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+}
+
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
struct htt_rx_info *info)
{
@@ -687,7 +691,7 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
case RX_MSDU_DECAP_NATIVE_WIFI:
/* pull decapped header and copy DA */
hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
skb_pull(skb, hdr_len);
@@ -751,7 +755,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
- ath10k_warn("received chained non A-MSDU frame\n");
+ ath10k_warn("htt rx received chained non A-MSDU frame\n");
ath10k_htt_rx_free_msdu_chain(skb->next);
skb->next = NULL;
}
@@ -774,7 +778,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
case RX_MSDU_DECAP_NATIVE_WIFI:
/* Pull decapped header */
hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
skb_pull(skb, hdr_len);
/* Push original header */
@@ -852,6 +856,20 @@ static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
return false;
}
+static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+
+ if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
+ return true;
+
+ return false;
+}
+
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
@@ -883,6 +901,57 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
return CHECKSUM_UNNECESSARY;
}
+static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
+{
+ struct sk_buff *next = msdu_head->next;
+ struct sk_buff *to_free = next;
+ int space;
+ int total_len = 0;
+
+ /* TODO: Might could optimize this by using
+ * skb_try_coalesce or similar method to
+ * decrease copying, or maybe get mac80211 to
+ * provide a way to just receive a list of
+ * skb?
+ */
+
+ msdu_head->next = NULL;
+
+ /* Allocate total length all at once. */
+ while (next) {
+ total_len += next->len;
+ next = next->next;
+ }
+
+ space = total_len - skb_tailroom(msdu_head);
+ if ((space > 0) &&
+ (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
+ /* TODO: bump some rx-oom error stat */
+ /* put it back together so we can free the
+ * whole list at once.
+ */
+ msdu_head->next = to_free;
+ return -1;
+ }
+
+ /* Walk list again, copying contents into
+ * msdu_head
+ */
+ next = to_free;
+ while (next) {
+ skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
+ next->len);
+ next = next->next;
+ }
+
+ /* If here, we have consolidated skb. Free the
+ * fragments and pass the main skb on up the
+ * stack.
+ */
+ ath10k_htt_rx_free_msdu_chain(to_free);
+ return 0;
+}
+
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
@@ -894,6 +963,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
u8 *fw_desc;
int i, j;
+ lockdep_assert_held(&htt->rx_ring.lock);
+
memset(&info, 0, sizeof(info));
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
@@ -937,6 +1008,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
}
if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx dropping due to decrypt-err\n");
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
@@ -944,13 +1017,16 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
status = info.status;
/* Skip mgmt frames while we handle this in WMI */
- if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
+ if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
+ ath10k_htt_rx_is_mgmt(msdu_head)) {
+ ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
if (status != HTT_RX_IND_MPDU_STATUS_OK &&
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+ status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
!htt->ar->monitor_enabled) {
ath10k_dbg(ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
@@ -960,14 +1036,14 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
}
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx CAC running\n");
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- /* FIXME: we do not support chaining yet.
- * this needs investigation */
- if (msdu_chaining) {
- ath10k_warn("msdu_chaining is true\n");
+ if (msdu_chaining &&
+ (ath10k_unchain_msdu(msdu_head) < 0)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
@@ -975,12 +1051,22 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
+
+ if (info.fcs_err)
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx has FCS err\n");
+
+ if (info.mic_err)
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx has MIC err\n");
+
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
info.signal += rx->ppdu.combined_rssi;
info.rate.info0 = rx->ppdu.info0;
info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
+ info.tsf = __le32_to_cpu(rx->ppdu.tsf);
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
@@ -1014,8 +1100,11 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_head = NULL;
msdu_tail = NULL;
+
+ spin_lock_bh(&htt->rx_ring.lock);
msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
&msdu_head, &msdu_tail);
+ spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
@@ -1095,7 +1184,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
skb_trim(info.skb, info.skb->len - trim);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
info.skb->data, info.skb->len);
ath10k_process_rx(htt->ar, &info);
@@ -1107,6 +1196,45 @@ end:
}
}
+static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_done tx_done = {};
+ int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
+ __le16 msdu_id;
+ int i;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ switch (status) {
+ case HTT_DATA_TX_STATUS_NO_ACK:
+ tx_done.no_ack = true;
+ break;
+ case HTT_DATA_TX_STATUS_OK:
+ break;
+ case HTT_DATA_TX_STATUS_DISCARD:
+ case HTT_DATA_TX_STATUS_POSTPONE:
+ case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
+ tx_done.discard = true;
+ break;
+ default:
+ ath10k_warn("unhandled tx completion status %d\n", status);
+ tx_done.discard = true;
+ break;
+ }
+
+ ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+ resp->data_tx_completion.num_msdus);
+
+ for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
+ msdu_id = resp->data_tx_completion.msdus[i];
+ tx_done.msdu_id = __le16_to_cpu(msdu_id);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ }
+}
+
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -1116,7 +1244,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath10k_warn("unaligned htt message, expect trouble\n");
- ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
+ ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
switch (resp->hdr.msg_type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
@@ -1125,10 +1253,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
complete(&htt->target_version_received);
break;
}
- case HTT_T2H_MSG_TYPE_RX_IND: {
- ath10k_htt_rx_handler(htt, &resp->rx_ind);
- break;
- }
+ case HTT_T2H_MSG_TYPE_RX_IND:
+ spin_lock_bh(&htt->rx_ring.lock);
+ __skb_queue_tail(&htt->rx_compl_q, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+ tasklet_schedule(&htt->txrx_compl_task);
+ return;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
.vdev_id = resp->peer_map.vdev_id,
@@ -1163,44 +1293,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
+ spin_lock_bh(&htt->tx_lock);
ath10k_txrx_tx_unref(htt, &tx_done);
+ spin_unlock_bh(&htt->tx_lock);
break;
}
- case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
- struct htt_tx_done tx_done = {};
- int status = MS(resp->data_tx_completion.flags,
- HTT_DATA_TX_STATUS);
- __le16 msdu_id;
- int i;
-
- switch (status) {
- case HTT_DATA_TX_STATUS_NO_ACK:
- tx_done.no_ack = true;
- break;
- case HTT_DATA_TX_STATUS_OK:
- break;
- case HTT_DATA_TX_STATUS_DISCARD:
- case HTT_DATA_TX_STATUS_POSTPONE:
- case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
- tx_done.discard = true;
- break;
- default:
- ath10k_warn("unhandled tx completion status %d\n",
- status);
- tx_done.discard = true;
- break;
- }
-
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
- resp->data_tx_completion.num_msdus);
-
- for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
- msdu_id = resp->data_tx_completion.msdus[i];
- tx_done.msdu_id = __le16_to_cpu(msdu_id);
- ath10k_txrx_tx_unref(htt, &tx_done);
- }
- break;
- }
+ case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
+ spin_lock_bh(&htt->tx_lock);
+ __skb_queue_tail(&htt->tx_compl_q, skb);
+ spin_unlock_bh(&htt->tx_lock);
+ tasklet_schedule(&htt->txrx_compl_task);
+ return;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
@@ -1240,3 +1343,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
/* Free the indication buffer */
dev_kfree_skb_any(skb);
}
+
+static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+{
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+ struct htt_resp *resp;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&htt->tx_lock);
+ while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+ ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_bh(&htt->tx_lock);
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
+ resp = (struct htt_resp *)skb->data;
+ ath10k_htt_rx_handler(htt, &resp->rx_ind);
+ dev_kfree_skb_any(skb);
+ }
+ spin_unlock_bh(&htt->rx_ring.lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index f1d36d2d2723..7a3e2e40dd5c 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -109,6 +109,14 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
return -ENOMEM;
}
+ htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
+ sizeof(struct ath10k_htt_txbuf), 4, 0);
+ if (!htt->tx_pool) {
+ kfree(htt->used_msdu_ids);
+ kfree(htt->pending_tx);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -117,9 +125,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
struct htt_tx_done tx_done = {0};
int msdu_id;
- /* No locks needed. Called after communication with the device has
- * been stopped. */
-
+ spin_lock_bh(&htt->tx_lock);
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
@@ -132,6 +138,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
ath10k_txrx_tx_unref(htt, &tx_done);
}
+ spin_unlock_bh(&htt->tx_lock);
}
void ath10k_htt_tx_detach(struct ath10k_htt *htt)
@@ -139,6 +146,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
ath10k_htt_tx_cleanup_pending(htt);
kfree(htt->pending_tx);
kfree(htt->used_msdu_ids);
+ dma_pool_destroy(htt->tx_pool);
return;
}
@@ -334,7 +342,9 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
goto err_free_msdu_id;
}
- res = ath10k_skb_map(dev, msdu);
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
if (res)
goto err_free_txdesc;
@@ -348,8 +358,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
- skb_cb->htt.frag_len = 0;
- skb_cb->htt.pad_len = 0;
+ skb_cb->htt.txbuf = NULL;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
@@ -358,7 +367,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
return 0;
err_unmap_msdu:
- ath10k_skb_unmap(dev, msdu);
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
@@ -375,19 +384,19 @@ err:
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct device *dev = htt->ar->dev;
- struct htt_cmd *cmd;
- struct htt_data_tx_desc_frag *tx_frags;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
- struct sk_buff *txdesc = NULL;
- bool use_frags;
- u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
- u8 tid;
- int prefetch_len, desc_len;
- int msdu_id = -1;
+ struct ath10k_hif_sg_item sg_items[2];
+ struct htt_data_tx_desc_frag *frags;
+ u8 vdev_id = skb_cb->vdev_id;
+ u8 tid = skb_cb->htt.tid;
+ int prefetch_len;
int res;
- u8 flags0;
- u16 flags1;
+ u8 flags0 = 0;
+ u16 msdu_id, flags1 = 0;
+ dma_addr_t paddr;
+ u32 frags_paddr;
+ bool use_frags;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
@@ -406,114 +415,120 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
- desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
-
- txdesc = ath10k_htc_alloc_skb(desc_len);
- if (!txdesc) {
- res = -ENOMEM;
- goto err_free_msdu_id;
- }
-
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
use_frags = htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control);
- if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
- ath10k_warn("htt alignment check failed. dropping packet.\n");
- res = -EIO;
- goto err_free_txdesc;
- }
+ skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
+ &paddr);
+ if (!skb_cb->htt.txbuf)
+ goto err_free_msdu_id;
+ skb_cb->htt.txbuf_paddr = paddr;
- if (use_frags) {
- skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
- skb_cb->htt.pad_len = (unsigned long)msdu->data -
- round_down((unsigned long)msdu->data, 4);
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res)
+ goto err_free_txbuf;
- skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
- } else {
- skb_cb->htt.frag_len = 0;
- skb_cb->htt.pad_len = 0;
- }
+ if (likely(use_frags)) {
+ frags = skb_cb->htt.txbuf->frags;
- res = ath10k_skb_map(dev, msdu);
- if (res)
- goto err_pull_txfrag;
-
- if (use_frags) {
- dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
- DMA_TO_DEVICE);
-
- /* tx fragment list must be terminated with zero-entry */
- tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
- tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
- skb_cb->htt.frag_len +
- skb_cb->htt.pad_len);
- tx_frags[0].len = __cpu_to_le32(msdu->len -
- skb_cb->htt.frag_len -
- skb_cb->htt.pad_len);
- tx_frags[1].paddr = __cpu_to_le32(0);
- tx_frags[1].len = __cpu_to_le32(0);
-
- dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
- DMA_TO_DEVICE);
- }
+ frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
+ frags[0].len = __cpu_to_le32(msdu->len);
+ frags[1].paddr = 0;
+ frags[1].len = 0;
- ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
- (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
- msdu->data, msdu->len);
+ flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
- skb_put(txdesc, desc_len);
- cmd = (struct htt_cmd *)txdesc->data;
+ frags_paddr = skb_cb->htt.txbuf_paddr;
+ } else {
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
- tid = ATH10K_SKB_CB(msdu)->htt.tid;
+ frags_paddr = skb_cb->paddr;
+ }
- ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
+ /* Normally all commands go through HTC which manages tx credits for
+ * each endpoint and notifies when tx is completed.
+ *
+ * HTT endpoint is creditless so there's no need to care about HTC
+ * flags. In that case it is trivial to fill the HTC header here.
+ *
+ * MSDU transmission is considered completed upon HTT event. This
+ * implies no relevant resources can be freed until after the event is
+ * received. That's why HTC tx completion handler itself is ignored by
+ * setting NULL to transfer_context for all sg items.
+ *
+ * There is simply no point in pushing HTT TX_FRM through HTC tx path
+ * as it's a waste of resources. By bypassing HTC it is possible to
+ * avoid extra memory allocations, compress data structures and thus
+ * improve performance. */
+
+ skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
+ skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
+ sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+ sizeof(skb_cb->htt.txbuf->cmd_tx) +
+ prefetch_len);
+ skb_cb->htt.txbuf->htc_hdr.flags = 0;
- flags0 = 0;
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
- flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- if (use_frags)
- flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
- else
- flags0 |= SM(ATH10K_HW_TXRX_MGMT,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
- cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
- cmd->data_tx.flags0 = flags0;
- cmd->data_tx.flags1 = __cpu_to_le16(flags1);
- cmd->data_tx.len = __cpu_to_le16(msdu->len -
- skb_cb->htt.frag_len -
- skb_cb->htt.pad_len);
- cmd->data_tx.id = __cpu_to_le16(msdu_id);
- cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
- cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
-
- memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
+ skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
+ skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+ skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
+
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
+ flags0, flags1, msdu->len, msdu_id, frags_paddr,
+ (u32)skb_cb->paddr, vdev_id, tid);
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ msdu->data, msdu->len);
- res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
+ sg_items[0].transfer_id = 0;
+ sg_items[0].transfer_context = NULL;
+ sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
+ sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
+ sizeof(skb_cb->htt.txbuf->frags);
+ sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
+ sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+ sizeof(skb_cb->htt.txbuf->cmd_tx);
+
+ sg_items[1].transfer_id = 0;
+ sg_items[1].transfer_context = NULL;
+ sg_items[1].vaddr = msdu->data;
+ sg_items[1].paddr = skb_cb->paddr;
+ sg_items[1].len = prefetch_len;
+
+ res = ath10k_hif_tx_sg(htt->ar,
+ htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+ sg_items, ARRAY_SIZE(sg_items));
if (res)
goto err_unmap_msdu;
return 0;
err_unmap_msdu:
- ath10k_skb_unmap(dev, msdu);
-err_pull_txfrag:
- skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
-err_free_txdesc:
- dev_kfree_skb_any(txdesc);
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_txbuf:
+ dma_pool_free(htt->tx_pool,
+ skb_cb->htt.txbuf,
+ skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index f1505a25d810..35fc44e281f5 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -205,8 +205,11 @@ enum ath10k_mcast2ucast_mode {
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
+#define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000
#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
+#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
+#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define SOC_CPU_CLOCK_OFFSET 0x00000020
#define SOC_CPU_CLOCK_STANDARD_LSB 0
#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
@@ -216,6 +219,8 @@ enum ath10k_mcast2ucast_mode {
#define SOC_LPO_CAL_OFFSET 0x000000e0
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
+#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
#define SOC_CHIP_ID_ADDRESS 0x000000ec
#define SOC_CHIP_ID_REV_LSB 8
@@ -273,6 +278,7 @@ enum ath10k_mcast2ucast_mode {
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014
#define SCRATCH_3_ADDRESS 0x0030
+#define CPU_INTR_ADDRESS 0x0010
/* Firmware indications to the Host via SCRATCH_3 register. */
#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 776e364eadcd..511a2f81e7af 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -323,13 +323,15 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to create wmi peer: %i\n", ret);
+ ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
return ret;
}
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to wait for created wmi peer: %i\n", ret);
+ ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
return ret;
}
spin_lock_bh(&ar->data_lock);
@@ -339,6 +341,51 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
return 0;
}
+static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ param = ar->wmi.pdev_param->sta_kickout_th;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ ATH10K_KICKOUT_THRESHOLD);
+ if (ret) {
+ ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
{
struct ath10k *ar = arvif->ar;
@@ -444,8 +491,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
static int ath10k_vdev_start(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
- struct ieee80211_conf *conf = &ar->hw->conf;
- struct ieee80211_channel *channel = conf->chandef.chan;
+ struct cfg80211_chan_def *chandef = &ar->chandef;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
@@ -457,16 +503,14 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
arg.dtim_period = arvif->dtim_period;
arg.bcn_intval = arvif->beacon_interval;
- arg.channel.freq = channel->center_freq;
-
- arg.channel.band_center_freq1 = conf->chandef.center_freq1;
-
- arg.channel.mode = chan_to_phymode(&conf->chandef);
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.mode = chan_to_phymode(chandef);
arg.channel.min_power = 0;
- arg.channel.max_power = channel->max_power * 2;
- arg.channel.max_reg_power = channel->max_reg_power * 2;
- arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
@@ -475,7 +519,7 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
/* For now allow DFS for AP mode */
arg.channel.chan_radar =
- !!(channel->flags & IEEE80211_CHAN_RADAR);
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
arg.ssid = arvif->vif->bss_conf.ssid;
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
@@ -488,13 +532,15 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
- ath10k_warn("WMI vdev start failed: ret %d\n", ret);
+ ath10k_warn("WMI vdev %i start failed: ret %d\n",
+ arg.vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("vdev setup failed %d\n", ret);
+ ath10k_warn("vdev %i setup failed %d\n",
+ arg.vdev_id, ret);
return ret;
}
@@ -512,13 +558,15 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
- ath10k_warn("WMI vdev stop failed: ret %d\n", ret);
+ ath10k_warn("WMI vdev %i stop failed: ret %d\n",
+ arvif->vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("vdev setup failed %d\n", ret);
+ ath10k_warn("vdev %i setup sync failed %d\n",
+ arvif->vdev_id, ret);
return ret;
}
@@ -527,7 +575,8 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
{
- struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
+ struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct ieee80211_channel *channel = chandef->chan;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
@@ -540,11 +589,11 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
- arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
/* TODO setup this dynamically, what in case we
don't have any vifs? */
- arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
+ arg.channel.mode = chan_to_phymode(chandef);
arg.channel.chan_radar =
!!(channel->flags & IEEE80211_CHAN_RADAR);
@@ -555,19 +604,22 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
- ath10k_warn("Monitor vdev start failed: ret %d\n", ret);
+ ath10k_warn("Monitor vdev %i start failed: ret %d\n",
+ vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("Monitor vdev setup failed %d\n", ret);
+ ath10k_warn("Monitor vdev %i setup failed %d\n",
+ vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
- ath10k_warn("Monitor vdev up failed: %d\n", ret);
+ ath10k_warn("Monitor vdev %i up failed: %d\n",
+ vdev_id, ret);
goto vdev_stop;
}
@@ -579,7 +631,8 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
vdev_stop:
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+ ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ar->monitor_vdev_id, ret);
return ret;
}
@@ -602,15 +655,18 @@ static int ath10k_monitor_stop(struct ath10k *ar)
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev down failed: %d\n", ret);
+ ath10k_warn("Monitor vdev %i down failed: %d\n",
+ ar->monitor_vdev_id, ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+ ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
if (ret)
- ath10k_warn("Monitor_down sync failed: %d\n", ret);
+ ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
ar->monitor_enabled = false;
return ret;
@@ -640,7 +696,8 @@ static int ath10k_monitor_create(struct ath10k *ar)
WMI_VDEV_TYPE_MONITOR,
0, ar->mac_addr);
if (ret) {
- ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret);
+ ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
+ ar->monitor_vdev_id, ret);
goto vdev_fail;
}
@@ -669,7 +726,8 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
if (ret) {
- ath10k_warn("WMI vdev monitor delete failed: %d\n", ret);
+ ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
+ ar->monitor_vdev_id, ret);
return ret;
}
@@ -791,6 +849,22 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
if (!info->enable_beacon) {
ath10k_vdev_stop(arvif);
+
+ arvif->is_started = false;
+ arvif->is_up = false;
+
+ spin_lock_bh(&arvif->ar->data_lock);
+ if (arvif->beacon) {
+ dma_unmap_single(arvif->ar->dev,
+ ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(arvif->beacon);
+
+ arvif->beacon = NULL;
+ arvif->beacon_sent = false;
+ }
+ spin_unlock_bh(&arvif->ar->data_lock);
+
return;
}
@@ -800,12 +874,21 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
if (ret)
return;
- ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid);
+ arvif->aid = 0;
+ memcpy(arvif->bssid, info->bssid, ETH_ALEN);
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
if (ret) {
- ath10k_warn("Failed to bring up VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ ath10k_vdev_stop(arvif);
return;
}
+
+ arvif->is_started = true;
+ arvif->is_up = true;
+
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
@@ -824,18 +907,18 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
self_peer, arvif->vdev_id, ret);
- if (is_zero_ether_addr(arvif->u.ibss.bssid))
+ if (is_zero_ether_addr(arvif->bssid))
return;
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
- arvif->u.ibss.bssid);
+ arvif->bssid);
if (ret) {
ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
- arvif->u.ibss.bssid, arvif->vdev_id, ret);
+ arvif->bssid, arvif->vdev_id, ret);
return;
}
- memset(arvif->u.ibss.bssid, 0, ETH_ALEN);
+ memset(arvif->bssid, 0, ETH_ALEN);
return;
}
@@ -878,8 +961,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
- ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
return ret;
}
} else {
@@ -1017,7 +1100,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int smps;
int i, n;
lockdep_assert_held(&ar->conf_mutex);
@@ -1063,17 +1145,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_flags |= WMI_PEER_STBC;
}
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
-
- if (smps == WLAN_HT_CAP_SM_PS_STATIC) {
- arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
- arg->peer_flags |= WMI_PEER_STATIC_MIMOPS;
- } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) {
- arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
- arg->peer_flags |= WMI_PEER_DYN_MIMOPS;
- }
-
if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
arg->peer_rate_caps |= WMI_RC_TS_FLAG;
else if (ht_cap->mcs.rx_mask[1])
@@ -1083,8 +1154,23 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
arg->peer_ht_rates.rates[n++] = i;
- arg->peer_ht_rates.num_rates = n;
- arg->peer_num_spatial_streams = max((n+7) / 8, 1);
+ /*
+ * This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_num_spatial_streams = sta->rx_nss;
+ }
ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
arg->addr,
@@ -1092,27 +1178,20 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_num_spatial_streams);
}
-static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
- struct ath10k_vif *arvif,
- struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
- struct wmi_peer_assoc_complete_arg *arg)
+static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
{
u32 uapsd = 0;
u32 max_sp = 0;
+ int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (sta->wme)
- arg->peer_flags |= WMI_PEER_QOS;
-
if (sta->wme && sta->uapsd_queues) {
ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
- arg->peer_flags |= WMI_PEER_APSD;
- arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
-
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
@@ -1130,35 +1209,40 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
max_sp = sta->max_sp;
- ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
- sta->addr,
- WMI_AP_PS_PEER_PARAM_UAPSD,
- uapsd);
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_UAPSD,
+ uapsd);
+ if (ret) {
+ ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
- ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
- sta->addr,
- WMI_AP_PS_PEER_PARAM_MAX_SP,
- max_sp);
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_MAX_SP,
+ max_sp);
+ if (ret) {
+ ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
/* TODO setup this based on STA listen interval and
beacon interval. Currently we don't know
sta->listen_interval - mac80211 patch required.
Currently use 10 seconds */
- ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
- sta->addr,
- WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
- 10);
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
+ if (ret) {
+ ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
}
-}
-static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar,
- struct ath10k_vif *arvif,
- struct ieee80211_sta *sta,
- struct ieee80211_bss_conf *bss_conf,
- struct wmi_peer_assoc_complete_arg *arg)
-{
- if (bss_conf->qos)
- arg->peer_flags |= WMI_PEER_QOS;
+ return 0;
}
static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
@@ -1211,10 +1295,17 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
{
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
- ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg);
+ if (sta->wme)
+ arg->peer_flags |= WMI_PEER_QOS;
+
+ if (sta->wme && sta->uapsd_queues) {
+ arg->peer_flags |= WMI_PEER_APSD;
+ arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
+ }
break;
case WMI_VDEV_TYPE_STA:
- ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg);
+ if (bss_conf->qos)
+ arg->peer_flags |= WMI_PEER_QOS;
break;
default:
break;
@@ -1293,6 +1384,33 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
return 0;
}
+static const u32 ath10k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath10k_smps_map))
+ return -EINVAL;
+
+ return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
+ WMI_PEER_SMPS_STATE,
+ ath10k_smps_map[smps]);
+}
+
/* can be called only in mac80211 callbacks due to `key_count` usage */
static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1300,6 +1418,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ieee80211_sta_ht_cap ht_cap;
struct wmi_peer_assoc_complete_arg peer_arg;
struct ieee80211_sta *ap_sta;
int ret;
@@ -1310,17 +1429,21 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!ap_sta) {
- ath10k_warn("Failed to find station entry for %pM\n",
- bss_conf->bssid);
+ ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
rcu_read_unlock();
return;
}
+ /* ap_sta must be accessed only within rcu section which must be left
+ * before calling ath10k_setup_peer_smps() which might sleep. */
+ ht_cap = ap_sta->ht_cap;
+
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
bss_conf, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
- bss_conf->bssid, ret);
+ ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
+ bss_conf->bssid, arvif->vdev_id, ret);
rcu_read_unlock();
return;
}
@@ -1329,8 +1452,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for %pM\n: %d",
- bss_conf->bssid, ret);
+ ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
+ if (ret) {
+ ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
+ arvif->vdev_id, ret);
return;
}
@@ -1338,11 +1468,17 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
- ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
- bss_conf->bssid);
- if (ret)
+ arvif->aid = bss_conf->aid;
+ memcpy(arvif->bssid, bss_conf->bssid, ETH_ALEN);
+
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
ath10k_warn("VDEV: %d up failed: ret %d\n",
arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
}
/*
@@ -1382,6 +1518,9 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
arvif->def_wep_key_idx = 0;
+
+ arvif->is_started = false;
+ arvif->is_up = false;
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
@@ -1394,21 +1533,35 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
if (ret) {
- ath10k_warn("WMI peer assoc prepare failed for %pM\n",
- sta->addr);
+ ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
return ret;
}
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for STA %pM\n: %d",
- sta->addr, ret);
+ ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
+ if (ret) {
+ ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
return ret;
}
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not install peer wep keys (%d)\n", ret);
+ ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
return ret;
}
@@ -1424,7 +1577,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
ret = ath10k_clear_peer_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
+ ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
return ret;
}
@@ -1547,9 +1701,9 @@ static void ath10k_regd_update(struct ath10k *ar)
/* Target allows setting up per-band regdomain but ath_common provides
* a combined one only */
ret = ath10k_wmi_pdev_set_regdomain(ar,
- regpair->regDmnEnum,
- regpair->regDmnEnum, /* 2ghz */
- regpair->regDmnEnum, /* 5ghz */
+ regpair->reg_domain,
+ regpair->reg_domain, /* 2ghz */
+ regpair->reg_domain, /* 5ghz */
regpair->reg_2ghz_ctl,
regpair->reg_5ghz_ctl);
if (ret)
@@ -2100,11 +2254,29 @@ static int ath10k_start(struct ieee80211_hw *hw)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
+ /*
+ * By default FW set ARP frames ac to voice (6). In that case ARP
+ * exchange is not working properly for UAPSD enabled AP. ARP requests
+ * which arrives with access category 0 are processed by network stack
+ * and send back with access category 0, but FW changes access category
+ * to 6. Set ARP frames access category to best effort (0) solves
+ * this problem.
+ */
+
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->arp_ac_override, 0);
+ if (ret) {
+ ath10k_warn("could not set arp ac override parameter: %d\n",
+ ret);
+ goto exit;
+ }
+
ath10k_regd_update(ar);
+ ret = 0;
exit:
mutex_unlock(&ar->conf_mutex);
- return 0;
+ return ret;
}
static void ath10k_stop(struct ieee80211_hw *hw)
@@ -2145,6 +2317,98 @@ static int ath10k_config_ps(struct ath10k *ar)
return ret;
}
+static const char *chandef_get_width(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ return "20 (noht)";
+ case NL80211_CHAN_WIDTH_20:
+ return "20";
+ case NL80211_CHAN_WIDTH_40:
+ return "40";
+ case NL80211_CHAN_WIDTH_80:
+ return "80";
+ case NL80211_CHAN_WIDTH_80P80:
+ return "80+80";
+ case NL80211_CHAN_WIDTH_160:
+ return "160";
+ case NL80211_CHAN_WIDTH_5:
+ return "5";
+ case NL80211_CHAN_WIDTH_10:
+ return "10";
+ }
+ return "?";
+}
+
+static void ath10k_config_chan(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ bool monitor_was_enabled;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
+ ar->chandef.chan->center_freq,
+ ar->chandef.center_freq1,
+ ar->chandef.center_freq2,
+ chandef_get_width(ar->chandef.width));
+
+ /* First stop monitor interface. Some FW versions crash if there's a
+ * lone monitor interface. */
+ monitor_was_enabled = ar->monitor_enabled;
+
+ if (ar->monitor_enabled)
+ ath10k_monitor_stop(ar);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_started)
+ continue;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ continue;
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret) {
+ ath10k_warn("could not stop vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* all vdevs are now stopped - now attempt to restart them */
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_started)
+ continue;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ continue;
+
+ ret = ath10k_vdev_start(arvif);
+ if (ret) {
+ ath10k_warn("could not start vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ if (!arvif->is_up)
+ continue;
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn("could not bring vdev up %d (%d)\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ if (monitor_was_enabled)
+ ath10k_monitor_start(ar, ar->monitor_vdev_id);
+}
+
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath10k *ar = hw->priv;
@@ -2165,6 +2429,11 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_bh(&ar->data_lock);
ath10k_config_radar_detection(ar);
+
+ if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
+ ar->chandef = conf->chandef;
+ ath10k_config_chan(ar);
+ }
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -2214,7 +2483,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
enum wmi_sta_powersave_param param;
int ret = 0;
- u32 value, param_id;
+ u32 value;
int bit;
u32 vdev_param;
@@ -2276,7 +2545,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
- ath10k_warn("WMI vdev create failed: ret %d\n", ret);
+ ath10k_warn("WMI vdev %i create failed: ret %d\n",
+ arvif->vdev_id, ret);
goto err;
}
@@ -2287,7 +2557,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
arvif->def_wep_key_idx);
if (ret) {
- ath10k_warn("Failed to set default keyid: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i default keyid: %d\n",
+ arvif->vdev_id, ret);
goto err_vdev_delete;
}
@@ -2296,23 +2567,25 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ATH10K_HW_TXRX_NATIVE_WIFI);
/* 10.X firmware does not support this VDEV parameter. Do not warn */
if (ret && ret != -EOPNOTSUPP) {
- ath10k_warn("Failed to set TX encap: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i TX encap: %d\n",
+ arvif->vdev_id, ret);
goto err_vdev_delete;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
- ath10k_warn("Failed to create peer for AP: %d\n", ret);
+ ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
+ arvif->vdev_id, ret);
goto err_vdev_delete;
}
- param_id = ar->wmi.pdev_param->sta_kickout_th;
-
- /* Disable STA KICKOUT functionality in FW */
- ret = ath10k_wmi_pdev_set_param(ar, param_id, 0);
- if (ret)
- ath10k_warn("Failed to disable STA KICKOUT\n");
+ ret = ath10k_mac_set_kickout(arvif);
+ if (ret) {
+ ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
}
if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
@@ -2321,7 +2594,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
+ arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2330,7 +2604,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
+ arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2339,7 +2614,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+ ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
+ arvif->vdev_id, ret);
goto err_peer_delete;
}
}
@@ -2403,17 +2679,19 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
if (ret)
- ath10k_warn("Failed to remove peer for AP: %d\n", ret);
+ ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
+ arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
}
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
+ ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
arvif->vdev_id);
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
- ath10k_warn("WMI vdev delete failed: %d\n", ret);
+ ath10k_warn("WMI vdev %i delete failed: %d\n",
+ arvif->vdev_id, ret);
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ar->monitor_present = false;
@@ -2502,8 +2780,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, arvif->beacon_interval);
if (ret)
- ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BEACON) {
@@ -2515,8 +2793,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
- ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
@@ -2530,8 +2808,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
- ath10k_warn("Failed to set dtim period for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_SSID &&
@@ -2551,7 +2829,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
- ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n",
+ ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2559,15 +2837,20 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
* this is never erased as we it for crypto key
* clearing; this is FW requirement
*/
- memcpy(arvif->u.sta.bssid, info->bssid,
- ETH_ALEN);
+ memcpy(arvif->bssid, info->bssid, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d start %pM\n",
arvif->vdev_id, info->bssid);
- /* FIXME: check return value */
ret = ath10k_vdev_start(arvif);
+ if (ret) {
+ ath10k_warn("failed to start vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
+ }
+
+ arvif->is_started = true;
}
/*
@@ -2576,7 +2859,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
* IBSS in order to remove BSSID peer.
*/
if (vif->type == NL80211_IFTYPE_ADHOC)
- memcpy(arvif->u.ibss.bssid, info->bssid,
+ memcpy(arvif->bssid, info->bssid,
ETH_ALEN);
}
}
@@ -2598,8 +2881,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
cts_prot);
if (ret)
- ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2617,8 +2900,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
- ath10k_warn("Failed to set erp slot for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2636,8 +2919,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
- ath10k_warn("Failed to set preamble for VDEV: %d\n",
- arvif->vdev_id);
+ ath10k_warn("Failed to set preamble for vdev %d: %i\n",
+ arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ASSOC) {
@@ -2645,6 +2928,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_bss_assoc(hw, vif, info);
}
+exit:
mutex_unlock(&ar->conf_mutex);
}
@@ -2767,8 +3051,8 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
key->keyidx);
if (ret)
- ath10k_warn("failed to set group key as default key: %d\n",
- ret);
+ ath10k_warn("failed to set vdev %i group key as default key: %d\n",
+ arvif->vdev_id, ret);
}
static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -2828,7 +3112,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr);
if (ret) {
- ath10k_warn("ath10k_install_key failed (%d)\n", ret);
+ ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
goto exit;
}
@@ -2850,6 +3135,69 @@ exit:
return ret;
}
+static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath10k *ar;
+ struct ath10k_vif *arvif;
+ struct ath10k_sta *arsta;
+ struct ieee80211_sta *sta;
+ u32 changed, bw, nss, smps;
+ int err;
+
+ arsta = container_of(wk, struct ath10k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
+ sta->addr, bw);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_CHAN_WIDTH, bw);
+ if (err)
+ ath10k_warn("failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath10k_warn("failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_SMPS_STATE, smps);
+ if (err)
+ ath10k_warn("failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2858,9 +3206,22 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
int max_num_peers;
int ret = 0;
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+ }
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ cancel_work_sync(&arsta->update_wk);
+
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -2899,8 +3260,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
+ sta->addr, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
@@ -2916,8 +3277,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_station_assoc(ar, arvif, sta);
if (ret)
- ath10k_warn("Failed to associate station: %pM\n",
- sta->addr);
+ ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -2930,8 +3291,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
- ath10k_warn("Failed to disassociate station: %pM\n",
- sta->addr);
+ ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
+ sta->addr, arvif->vdev_id, ret);
}
exit:
mutex_unlock(&ar->conf_mutex);
@@ -3212,7 +3573,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}), ATH10K_FLUSH_TIMEOUT_HZ);
if (ret <= 0 || skip)
- ath10k_warn("tx not flushed\n");
+ ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
+ skip, ar->state, ret);
skip:
mutex_unlock(&ar->conf_mutex);
@@ -3234,23 +3596,14 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
int ret;
- ar->is_target_paused = false;
+ mutex_lock(&ar->conf_mutex);
- ret = ath10k_wmi_pdev_suspend_target(ar);
+ ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
if (ret) {
- ath10k_warn("could not suspend target (%d)\n", ret);
- return 1;
- }
-
- ret = wait_event_interruptible_timeout(ar->event_queue,
- ar->is_target_paused == true,
- 1 * HZ);
- if (ret < 0) {
- ath10k_warn("suspend interrupted (%d)\n", ret);
- goto resume;
- } else if (ret == 0) {
- ath10k_warn("suspend timed out - target pause event never came\n");
- goto resume;
+ if (ret == -ETIMEDOUT)
+ goto resume;
+ ret = 1;
+ goto exit;
}
ret = ath10k_hif_suspend(ar);
@@ -3259,12 +3612,17 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
goto resume;
}
- return 0;
+ ret = 0;
+ goto exit;
resume:
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret)
ath10k_warn("could not resume target (%d)\n", ret);
- return 1;
+
+ ret = 1;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
}
static int ath10k_resume(struct ieee80211_hw *hw)
@@ -3272,19 +3630,26 @@ static int ath10k_resume(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
int ret;
+ mutex_lock(&ar->conf_mutex);
+
ret = ath10k_hif_resume(ar);
if (ret) {
ath10k_warn("could not resume hif (%d)\n", ret);
- return 1;
+ ret = 1;
+ goto exit;
}
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret) {
ath10k_warn("could not resume target (%d)\n", ret);
- return 1;
+ ret = 1;
+ goto exit;
}
- return 0;
+ ret = 0;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
}
#endif
@@ -3575,7 +3940,8 @@ static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
u8 fixed_rate,
- u8 fixed_nss)
+ u8 fixed_nss,
+ u8 force_sgi)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
@@ -3584,12 +3950,16 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
mutex_lock(&ar->conf_mutex);
if (arvif->fixed_rate == fixed_rate &&
- arvif->fixed_nss == fixed_nss)
+ arvif->fixed_nss == fixed_nss &&
+ arvif->force_sgi == force_sgi)
goto exit;
if (fixed_rate == WMI_FIXED_RATE_NONE)
ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+ if (force_sgi)
+ ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
+
vdev_param = ar->wmi.vdev_param->fixed_rate;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
vdev_param, fixed_rate);
@@ -3615,6 +3985,19 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
arvif->fixed_nss = fixed_nss;
+ vdev_param = ar->wmi.vdev_param->sgi;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ force_sgi);
+
+ if (ret) {
+ ath10k_warn("Could not set sgi param %d: %d\n",
+ force_sgi, ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arvif->force_sgi = force_sgi;
+
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
@@ -3629,6 +4012,11 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
u8 fixed_rate = WMI_FIXED_RATE_NONE;
u8 fixed_nss = ar->num_rf_chains;
+ u8 force_sgi;
+
+ force_sgi = mask->control[band].gi;
+ if (force_sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
if (!ath10k_default_bitrate_mask(ar, band, mask)) {
if (!ath10k_get_fixed_rate_nss(mask, band,
@@ -3637,7 +4025,113 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
return -EINVAL;
}
- return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss);
+ if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
+ ath10k_warn("Could not force SGI usage for default rate settings\n");
+ return -EINVAL;
+ }
+
+ return ath10k_set_fixed_rate_param(arvif, fixed_rate,
+ fixed_nss, force_sgi);
+}
+
+static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ /* there's no need to do anything here. vif->csa_active is enough */
+ return;
+}
+
+static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->bandwidth, sta->rx_nss,
+ sta->smps_mode);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ ath10k_warn("mac sta rc update for %pM: invalid bw %d\n",
+ sta->addr, sta->bandwidth);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ case IEEE80211_SMPS_NUM_MODES:
+ ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n",
+ sta->addr, sta->smps_mode);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ /* FIXME: Not implemented. Probably the only way to do it would
+ * be to re-assoc the peer. */
+ changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac sta rc update for %pM: changing supported rates not implemented\n",
+ sta->addr);
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ /*
+ * FIXME: Return 0 for time being. Need to figure out whether FW
+ * has the API to fetch 64-bit local TSF
+ */
+
+ return 0;
}
static const struct ieee80211_ops ath10k_ops = {
@@ -3663,6 +4157,9 @@ static const struct ieee80211_ops ath10k_ops = {
.restart_complete = ath10k_restart_complete,
.get_survey = ath10k_get_survey,
.set_bitrate_mask = ath10k_set_bitrate_mask,
+ .channel_switch_beacon = ath10k_channel_switch_beacon,
+ .sta_rc_update = ath10k_sta_rc_update,
+ .get_tsf = ath10k_get_tsf,
#ifdef CONFIG_PM
.suspend = ath10k_suspend,
.resume = ath10k_resume,
@@ -3939,7 +4436,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
ath10k_get_arvif_iter,
&arvif_iter);
if (!arvif_iter.arvif) {
- ath10k_warn("No VIF found for VDEV: %d\n", vdev_id);
+ ath10k_warn("No VIF found for vdev %d\n", vdev_id);
return NULL;
}
@@ -4020,7 +4517,8 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_AP_LINK_PS;
+ IEEE80211_HW_AP_LINK_PS |
+ IEEE80211_HW_SPECTRUM_MGMT;
/* MSDU can have HTT TX fragment pushed in front. The additional 4
* bytes is used for padding/alignment if necessary. */
@@ -4038,10 +4536,12 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath10k_sta);
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
@@ -4076,7 +4576,7 @@ int ath10k_mac_register(struct ath10k *ar)
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
- ath10k_err("Regulatory initialization failed\n");
+ ath10k_err("Regulatory initialization failed: %i\n", ret);
goto err_free;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 29fd197d1fd8..9d242d801d9d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,13 +58,12 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
u32 *data);
-static void ath10k_pci_process_ce(struct ath10k *ar);
static int ath10k_pci_post_rx(struct ath10k *ar);
static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
int num);
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
-static void ath10k_pci_stop_ce(struct ath10k *ar);
-static int ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_cold_reset(struct ath10k *ar);
+static int ath10k_pci_warm_reset(struct ath10k *ar);
static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
static int ath10k_pci_init_irq(struct ath10k *ar);
static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -73,7 +72,6 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
struct ath10k_ce_pipe *rx_pipe,
struct bmi_xfer *xfer);
-static void ath10k_pci_cleanup_ce(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
@@ -678,34 +676,12 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
}
}
-/*
- * FIXME: Handle OOM properly.
- */
-static inline
-struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
-{
- struct ath10k_pci_compl *compl = NULL;
-
- spin_lock_bh(&pipe_info->pipe_lock);
- if (list_empty(&pipe_info->compl_free)) {
- ath10k_warn("Completion buffers are full\n");
- goto exit;
- }
- compl = list_first_entry(&pipe_info->compl_free,
- struct ath10k_pci_compl, list);
- list_del(&compl->list);
-exit:
- spin_unlock_bh(&pipe_info->pipe_lock);
- return compl;
-}
-
/* Called by lower (CE) layer when a send to Target completes. */
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
- struct ath10k_pci_compl *compl;
+ struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
void *transfer_context;
u32 ce_data;
unsigned int nbytes;
@@ -714,27 +690,12 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
&ce_data, &nbytes,
&transfer_id) == 0) {
- compl = get_free_compl(pipe_info);
- if (!compl)
- break;
-
- compl->state = ATH10K_PCI_COMPL_SEND;
- compl->ce_state = ce_state;
- compl->pipe_info = pipe_info;
- compl->skb = transfer_context;
- compl->nbytes = nbytes;
- compl->transfer_id = transfer_id;
- compl->flags = 0;
+ /* no need to call tx completion for NULL pointers */
+ if (transfer_context == NULL)
+ continue;
- /*
- * Add the completion to the processing queue.
- */
- spin_lock_bh(&ar_pci->compl_lock);
- list_add_tail(&compl->list, &ar_pci->compl_process);
- spin_unlock_bh(&ar_pci->compl_lock);
+ cb->tx_completion(ar, transfer_context, transfer_id);
}
-
- ath10k_pci_process_ce(ar);
}
/* Called by lower (CE) layer when data is received from the Target. */
@@ -743,77 +704,100 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
- struct ath10k_pci_compl *compl;
+ struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
struct sk_buff *skb;
void *transfer_context;
u32 ce_data;
- unsigned int nbytes;
+ unsigned int nbytes, max_nbytes;
unsigned int transfer_id;
unsigned int flags;
+ int err;
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
&ce_data, &nbytes, &transfer_id,
&flags) == 0) {
- compl = get_free_compl(pipe_info);
- if (!compl)
- break;
-
- compl->state = ATH10K_PCI_COMPL_RECV;
- compl->ce_state = ce_state;
- compl->pipe_info = pipe_info;
- compl->skb = transfer_context;
- compl->nbytes = nbytes;
- compl->transfer_id = transfer_id;
- compl->flags = flags;
+ err = ath10k_pci_post_rx_pipe(pipe_info, 1);
+ if (unlikely(err)) {
+ /* FIXME: retry */
+ ath10k_warn("failed to replenish CE rx ring %d: %d\n",
+ pipe_info->pipe_num, err);
+ }
skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
- /*
- * Add the completion to the processing queue.
- */
- spin_lock_bh(&ar_pci->compl_lock);
- list_add_tail(&compl->list, &ar_pci->compl_process);
- spin_unlock_bh(&ar_pci->compl_lock);
- }
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn("rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
- ath10k_pci_process_ce(ar);
+ skb_put(skb, nbytes);
+ cb->rx_completion(ar, skb, pipe_info->pipe_num);
+ }
}
-/* Send the first nbytes bytes of the buffer */
-static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
- unsigned int transfer_id,
- unsigned int bytes, struct sk_buff *nbuf)
+static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
{
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
- struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
- unsigned int len;
- u32 flags = 0;
- int ret;
+ struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
+ struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
+ struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int write_index = src_ring->write_index;
+ int err, i;
- len = min(bytes, nbuf->len);
- bytes -= len;
+ spin_lock_bh(&ar_pci->ce_lock);
- if (len & 3)
- ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) < n_items)) {
+ err = -ENOBUFS;
+ goto unlock;
+ }
- ath10k_dbg(ATH10K_DBG_PCI,
- "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
- nbuf->data, (unsigned long long) skb_cb->paddr,
- nbuf->len, len);
- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
- "ath10k tx: data: ",
- nbuf->data, nbuf->len);
-
- ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
- flags);
- if (ret)
- ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
+ for (i = 0; i < n_items - 1; i++) {
+ ath10k_dbg(ATH10K_DBG_PCI,
+ "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+ i, items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+ items[i].vaddr, items[i].len);
- return ret;
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ CE_SEND_FLAG_GATHER);
+ if (err)
+ goto unlock;
+ }
+
+ /* `i` is equal to `n_items -1` after for() */
+
+ ath10k_dbg(ATH10K_DBG_PCI,
+ "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+ i, items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+ items[i].vaddr, items[i].len);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ 0);
+ if (err)
+ goto unlock;
+
+ err = 0;
+unlock:
+ spin_unlock_bh(&ar_pci->ce_lock);
+ return err;
}
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
@@ -833,9 +817,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
ath10k_err("firmware crashed!\n");
ath10k_err("hardware name %s version 0x%x\n",
ar->hw_params.name, ar->target_version);
- ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
- ar->fw_version_minor, ar->fw_version_release,
- ar->fw_version_build);
+ ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
ret = ath10k_pci_diag_read_mem(ar, host_addr,
@@ -904,52 +886,6 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
sizeof(ar_pci->msg_callbacks_current));
}
-static int ath10k_pci_alloc_compl(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- const struct ce_attr *attr;
- struct ath10k_pci_pipe *pipe_info;
- struct ath10k_pci_compl *compl;
- int i, pipe_num, completions;
-
- spin_lock_init(&ar_pci->compl_lock);
- INIT_LIST_HEAD(&ar_pci->compl_process);
-
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
-
- spin_lock_init(&pipe_info->pipe_lock);
- INIT_LIST_HEAD(&pipe_info->compl_free);
-
- /* Handle Diagnostic CE specially */
- if (pipe_info->ce_hdl == ar_pci->ce_diag)
- continue;
-
- attr = &host_ce_config_wlan[pipe_num];
- completions = 0;
-
- if (attr->src_nentries)
- completions += attr->src_nentries;
-
- if (attr->dest_nentries)
- completions += attr->dest_nentries;
-
- for (i = 0; i < completions; i++) {
- compl = kmalloc(sizeof(*compl), GFP_KERNEL);
- if (!compl) {
- ath10k_warn("No memory for completion state\n");
- ath10k_pci_cleanup_ce(ar);
- return -ENOMEM;
- }
-
- compl->state = ATH10K_PCI_COMPL_FREE;
- list_add_tail(&compl->list, &pipe_info->compl_free);
- }
- }
-
- return 0;
-}
-
static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -994,147 +930,6 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
tasklet_kill(&ar_pci->pipe_info[i].intr);
}
-static void ath10k_pci_stop_ce(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_compl *compl;
- struct sk_buff *skb;
-
- /* Mark pending completions as aborted, so that upper layers free up
- * their associated resources */
- spin_lock_bh(&ar_pci->compl_lock);
- list_for_each_entry(compl, &ar_pci->compl_process, list) {
- skb = compl->skb;
- ATH10K_SKB_CB(skb)->is_aborted = true;
- }
- spin_unlock_bh(&ar_pci->compl_lock);
-}
-
-static void ath10k_pci_cleanup_ce(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_compl *compl, *tmp;
- struct ath10k_pci_pipe *pipe_info;
- struct sk_buff *netbuf;
- int pipe_num;
-
- /* Free pending completions. */
- spin_lock_bh(&ar_pci->compl_lock);
- if (!list_empty(&ar_pci->compl_process))
- ath10k_warn("pending completions still present! possible memory leaks.\n");
-
- list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
- list_del(&compl->list);
- netbuf = compl->skb;
- dev_kfree_skb_any(netbuf);
- kfree(compl);
- }
- spin_unlock_bh(&ar_pci->compl_lock);
-
- /* Free unused completions for each pipe. */
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
-
- spin_lock_bh(&pipe_info->pipe_lock);
- list_for_each_entry_safe(compl, tmp,
- &pipe_info->compl_free, list) {
- list_del(&compl->list);
- kfree(compl);
- }
- spin_unlock_bh(&pipe_info->pipe_lock);
- }
-}
-
-static void ath10k_pci_process_ce(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ar->hif.priv;
- struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
- struct ath10k_pci_compl *compl;
- struct sk_buff *skb;
- unsigned int nbytes;
- int ret, send_done = 0;
-
- /* Upper layers aren't ready to handle tx/rx completions in parallel so
- * we must serialize all completion processing. */
-
- spin_lock_bh(&ar_pci->compl_lock);
- if (ar_pci->compl_processing) {
- spin_unlock_bh(&ar_pci->compl_lock);
- return;
- }
- ar_pci->compl_processing = true;
- spin_unlock_bh(&ar_pci->compl_lock);
-
- for (;;) {
- spin_lock_bh(&ar_pci->compl_lock);
- if (list_empty(&ar_pci->compl_process)) {
- spin_unlock_bh(&ar_pci->compl_lock);
- break;
- }
- compl = list_first_entry(&ar_pci->compl_process,
- struct ath10k_pci_compl, list);
- list_del(&compl->list);
- spin_unlock_bh(&ar_pci->compl_lock);
-
- switch (compl->state) {
- case ATH10K_PCI_COMPL_SEND:
- cb->tx_completion(ar,
- compl->skb,
- compl->transfer_id);
- send_done = 1;
- break;
- case ATH10K_PCI_COMPL_RECV:
- ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
- if (ret) {
- ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
- compl->pipe_info->pipe_num, ret);
- break;
- }
-
- skb = compl->skb;
- nbytes = compl->nbytes;
-
- ath10k_dbg(ATH10K_DBG_PCI,
- "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
- skb, nbytes);
- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
- "ath10k rx: ", skb->data, nbytes);
-
- if (skb->len + skb_tailroom(skb) >= nbytes) {
- skb_trim(skb, 0);
- skb_put(skb, nbytes);
- cb->rx_completion(ar, skb,
- compl->pipe_info->pipe_num);
- } else {
- ath10k_warn("rxed more than expected (nbytes %d, max %d)",
- nbytes,
- skb->len + skb_tailroom(skb));
- }
- break;
- case ATH10K_PCI_COMPL_FREE:
- ath10k_warn("free completion cannot be processed\n");
- break;
- default:
- ath10k_warn("invalid completion state (%d)\n",
- compl->state);
- break;
- }
-
- compl->state = ATH10K_PCI_COMPL_FREE;
-
- /*
- * Add completion back to the pipe's free list.
- */
- spin_lock_bh(&compl->pipe_info->pipe_lock);
- list_add_tail(&compl->list, &compl->pipe_info->compl_free);
- spin_unlock_bh(&compl->pipe_info->pipe_lock);
- }
-
- spin_lock_bh(&ar_pci->compl_lock);
- ar_pci->compl_processing = false;
- spin_unlock_bh(&ar_pci->compl_lock);
-}
-
/* TODO - temporary mapping while we have too few CE's */
static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id, u8 *ul_pipe,
@@ -1306,17 +1101,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
- ret = ath10k_pci_alloc_compl(ar);
- if (ret) {
- ath10k_warn("failed to allocate CE completions: %d\n", ret);
- goto err_early_irq;
- }
-
ret = ath10k_pci_request_irq(ar);
if (ret) {
ath10k_warn("failed to post RX buffers for all pipes: %d\n",
ret);
- goto err_free_compl;
+ goto err_early_irq;
}
ret = ath10k_pci_setup_ce_irq(ar);
@@ -1340,10 +1129,6 @@ err_stop:
ath10k_ce_disable_interrupts(ar);
ath10k_pci_free_irq(ar);
ath10k_pci_kill_tasklet(ar);
- ath10k_pci_stop_ce(ar);
- ath10k_pci_process_ce(ar);
-err_free_compl:
- ath10k_pci_cleanup_ce(ar);
err_early_irq:
/* Though there should be no interrupts (device was reset)
* power_down() expects the early IRQ to be installed as per the
@@ -1414,18 +1199,10 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
&ce_data, &nbytes, &id) == 0) {
- /*
- * Indicate the completion to higer layer to free
- * the buffer
- */
-
- if (!netbuf) {
- ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
- ce_hdl->id);
+ /* no need to call tx completion for NULL pointers */
+ if (!netbuf)
continue;
- }
- ATH10K_SKB_CB(netbuf)->is_aborted = true;
ar_pci->msg_callbacks_current.tx_completion(ar,
netbuf,
id);
@@ -1483,7 +1260,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_free_irq(ar);
ath10k_pci_kill_tasklet(ar);
- ath10k_pci_stop_ce(ar);
ret = ath10k_pci_request_early_irq(ar);
if (ret)
@@ -1493,8 +1269,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
* not DMA nor interrupt. We process the leftovers and then free
* everything else up. */
- ath10k_pci_process_ce(ar);
- ath10k_pci_cleanup_ce(ar);
ath10k_pci_buffer_cleanup(ar);
/* Make the sure the device won't access any structures on the host by
@@ -1502,7 +1276,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
* configuration during init. If ringbuffers are freed and the device
* were to access them this could lead to memory corruption on the
* host. */
- ath10k_pci_device_reset(ar);
+ ath10k_pci_warm_reset(ar);
ar_pci->started = 0;
}
@@ -1993,7 +1767,94 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
-static int ath10k_pci_hif_power_up(struct ath10k *ar)
+static int ath10k_pci_warm_reset(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret = 0;
+ u32 val;
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
+
+ ret = ath10k_do_pci_wake(ar);
+ if (ret) {
+ ath10k_err("failed to wake up target: %d\n", ret);
+ return ret;
+ }
+
+ /* debug */
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CAUSE_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CPU_INTR_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+ val);
+
+ /* disable pending irqs */
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS, 0);
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CLR_ADDRESS, ~0);
+
+ msleep(100);
+
+ /* clear fw indicator */
+ ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
+
+ /* clear target LF timer interrupts */
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_LF_TIMER_CONTROL0_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_LF_TIMER_CONTROL0_ADDRESS,
+ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+
+ /* reset CE */
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ msleep(10);
+
+ /* unreset CE */
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ msleep(10);
+
+ /* debug */
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CAUSE_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CPU_INTR_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+ val);
+
+ /* CPU warm reset */
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
+
+ msleep(100);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
+
+ ath10k_do_pci_sleep(ar);
+ return ret;
+}
+
+static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
const char *irq_mode;
@@ -2009,7 +1870,11 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
* is in an unexpected state. We try to catch that here in order to
* reset the Target and retry the probe.
*/
- ret = ath10k_pci_device_reset(ar);
+ if (cold_reset)
+ ret = ath10k_pci_cold_reset(ar);
+ else
+ ret = ath10k_pci_warm_reset(ar);
+
if (ret) {
ath10k_err("failed to reset target: %d\n", ret);
goto err;
@@ -2079,7 +1944,7 @@ err_deinit_irq:
ath10k_pci_deinit_irq(ar);
err_ce:
ath10k_pci_ce_deinit(ar);
- ath10k_pci_device_reset(ar);
+ ath10k_pci_warm_reset(ar);
err_ps:
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
@@ -2087,6 +1952,34 @@ err:
return ret;
}
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
+{
+ int ret;
+
+ /*
+ * Hardware CUS232 version 2 has some issues with cold reset and the
+ * preferred (and safer) way to perform a device reset is through a
+ * warm reset.
+ *
+ * Warm reset doesn't always work though (notably after a firmware
+ * crash) so fall back to cold reset if necessary.
+ */
+ ret = __ath10k_pci_hif_power_up(ar, false);
+ if (ret) {
+ ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
+ ret);
+
+ ret = __ath10k_pci_hif_power_up(ar, true);
+ if (ret) {
+ ath10k_err("failed to power up target using cold reset too (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2094,7 +1987,7 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
- ath10k_pci_device_reset(ar);
+ ath10k_pci_warm_reset(ar);
ath10k_pci_ce_deinit(ar);
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
@@ -2151,7 +2044,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
#endif
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
- .send_head = ath10k_pci_hif_send_head,
+ .tx_sg = ath10k_pci_hif_tx_sg,
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
.start = ath10k_pci_hif_start,
.stop = ath10k_pci_hif_stop,
@@ -2411,11 +2304,10 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
/* Try MSI-X */
if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
- ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
- if (ret == 0)
- return 0;
+ ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
+ ar_pci->num_msi_intrs);
if (ret > 0)
- pci_disable_msi(ar_pci->pdev);
+ return 0;
/* fall-through */
}
@@ -2482,6 +2374,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
case MSI_NUM_REQUEST:
pci_disable_msi(ar_pci->pdev);
return 0;
+ default:
+ pci_disable_msi(ar_pci->pdev);
}
ath10k_warn("unknown irq configuration upon deinit\n");
@@ -2523,7 +2417,7 @@ out:
return ret;
}
-static int ath10k_pci_device_reset(struct ath10k *ar)
+static int ath10k_pci_cold_reset(struct ath10k *ar)
{
int i, ret;
u32 val;
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index a4f32038c440..b43fdb4f7319 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -43,23 +43,6 @@ struct bmi_xfer {
u32 resp_len;
};
-enum ath10k_pci_compl_state {
- ATH10K_PCI_COMPL_FREE = 0,
- ATH10K_PCI_COMPL_SEND,
- ATH10K_PCI_COMPL_RECV,
-};
-
-struct ath10k_pci_compl {
- struct list_head list;
- enum ath10k_pci_compl_state state;
- struct ath10k_ce_pipe *ce_state;
- struct ath10k_pci_pipe *pipe_info;
- struct sk_buff *skb;
- unsigned int nbytes;
- unsigned int transfer_id;
- unsigned int flags;
-};
-
/*
* PCI-specific Target state
*
@@ -175,9 +158,6 @@ struct ath10k_pci_pipe {
/* protects compl_free and num_send_allowed */
spinlock_t pipe_lock;
- /* List of free CE completion slots */
- struct list_head compl_free;
-
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
@@ -205,14 +185,6 @@ struct ath10k_pci {
atomic_t keep_awake_count;
bool verified_awake;
- /* List of CE completions to be processed */
- struct list_head compl_process;
-
- /* protects compl_processing and compl_process */
- spinlock_t compl_lock;
-
- bool compl_processing;
-
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 74f45fa6f428..0541dd939ce9 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -51,7 +51,8 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ieee80211_tx_info *info;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
- int ret;
+
+ lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
@@ -65,12 +66,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
msdu = htt->pending_tx[tx_done->msdu_id];
skb_cb = ATH10K_SKB_CB(msdu);
- ret = ath10k_skb_unmap(dev, msdu);
- if (ret)
- ath10k_warn("data skb unmap failed (%d)\n", ret);
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->htt.frag_len)
- skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+ if (skb_cb->htt.txbuf)
+ dma_pool_free(htt->tx_pool,
+ skb_cb->htt.txbuf,
+ skb_cb->htt.txbuf_paddr);
ath10k_report_offchan_tx(htt->ar, msdu);
@@ -92,13 +93,11 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
/* we do not own the msdu anymore */
exit:
- spin_lock_bh(&htt->tx_lock);
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
- spin_unlock_bh(&htt->tx_lock);
}
static const u8 rx_legacy_rate_idx[] = {
@@ -204,7 +203,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
break;
/* 80MHZ */
case 2:
- status->flag |= RX_FLAG_80MHZ;
+ status->vht_flag |= RX_VHT_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
@@ -258,20 +257,26 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->band = ch->band;
status->freq = ch->center_freq;
+ if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
+ /* TSF available only in 32-bit */
+ status->mactime = info->tsf & 0xffffffff;
+ status->flag |= RX_FLAG_MACTIME_END;
+ }
+
ath10k_dbg(ATH10K_DBG_DATA,
- "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n",
+ "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
info->skb,
info->skb->len,
status->flag == 0 ? "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
- status->flag & RX_FLAG_80MHZ ? "80" : "",
+ status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
status->freq,
- status->band);
+ status->band, status->flag, info->fcs_err);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
info->skb->data, info->skb->len);
@@ -378,7 +383,8 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
- ath10k_warn("unknown peer id %d\n", ev->peer_id);
+ ath10k_warn("peer-unmap-event: unknown peer id %d\n",
+ ev->peer_id);
goto exit;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 712a606a080a..cb1f7b5bcf4c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -213,7 +213,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
- .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
@@ -420,7 +420,6 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
- .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
.dcs = WMI_PDEV_PARAM_DCS,
.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
@@ -472,8 +471,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
.bcnflt_stats_update_period =
WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
- .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
- .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
.dcs = WMI_10X_PDEV_PARAM_DCS,
.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
@@ -561,7 +559,6 @@ err_pull:
static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
{
- struct wmi_bcn_tx_arg arg = {0};
int ret;
lockdep_assert_held(&arvif->ar->data_lock);
@@ -569,18 +566,16 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
if (arvif->beacon == NULL)
return;
- arg.vdev_id = arvif->vdev_id;
- arg.tx_rate = 0;
- arg.tx_power = 0;
- arg.bcn = arvif->beacon->data;
- arg.bcn_len = arvif->beacon->len;
+ if (arvif->beacon_sent)
+ return;
- ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
+ ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
if (ret)
return;
- dev_kfree_skb_any(arvif->beacon);
- arvif->beacon = NULL;
+ /* We need to retain the arvif->beacon reference for DMA unmapping and
+ * freeing the skbuff later. */
+ arvif->beacon_sent = true;
}
static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
@@ -1116,7 +1111,27 @@ static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
+ struct wmi_peer_sta_kickout_event *ev;
+ struct ieee80211_sta *sta;
+
+ ev = (struct wmi_peer_sta_kickout_event *)skb->data;
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
+ ev->peer_macaddr.addr);
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
+ if (!sta) {
+ ath10k_warn("Spurious quick kickout for STA %pM\n",
+ ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ rcu_read_unlock();
}
/*
@@ -1217,6 +1232,13 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
+ if (tim->dtim_count == 0) {
+ ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
+
+ if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
+ ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
+ }
+
ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
tim->dtim_count, tim->dtim_period,
tim->bitmap_ctrl, pvm_len);
@@ -1338,7 +1360,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
struct sk_buff *bcn;
- int vdev_id = 0;
+ int ret, vdev_id = 0;
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
@@ -1385,6 +1407,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
continue;
}
+ /* There are no completions for beacons so wait for next SWBA
+ * before telling mac80211 to decrement CSA counter
+ *
+ * Once CSA counter is completed stop sending beacons until
+ * actual channel switch is done */
+ if (arvif->vif->csa_active &&
+ ieee80211_csa_is_complete(arvif->vif)) {
+ ieee80211_csa_finish(arvif->vif);
+ continue;
+ }
+
bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
if (!bcn) {
ath10k_warn("could not get mac80211 beacon\n");
@@ -1396,15 +1429,33 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
spin_lock_bh(&ar->data_lock);
+
if (arvif->beacon) {
- ath10k_warn("SWBA overrun on vdev %d\n",
- arvif->vdev_id);
+ if (!arvif->beacon_sent)
+ ath10k_warn("SWBA overrun on vdev %d\n",
+ arvif->vdev_id);
+
+ dma_unmap_single(arvif->ar->dev,
+ ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
}
+ ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
+ bcn->data, bcn->len,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(arvif->ar->dev,
+ ATH10K_SKB_CB(bcn)->paddr);
+ if (ret) {
+ ath10k_warn("failed to map beacon: %d\n", ret);
+ goto skip;
+ }
+
arvif->beacon = bcn;
+ arvif->beacon_sent = false;
ath10k_wmi_tx_beacon_nowait(arvif);
+skip:
spin_unlock_bh(&ar->data_lock);
}
}
@@ -2031,11 +2082,11 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+ "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->abi_version),
ev->mac_addr.addr,
- __le32_to_cpu(ev->status));
+ __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
complete(&ar->wmi.unified_ready);
return 0;
@@ -2403,7 +2454,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
ar->wmi.cmd->pdev_set_channel_cmdid);
}
-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
{
struct wmi_pdev_suspend_cmd *cmd;
struct sk_buff *skb;
@@ -2413,7 +2464,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
return -ENOMEM;
cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
- cmd->suspend_opt = WMI_PDEV_SUSPEND;
+ cmd->suspend_opt = __cpu_to_le32(suspend_opt);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
}
@@ -3342,7 +3393,6 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
ci->max_power = ch->max_power;
ci->reg_power = ch->max_reg_power;
ci->antenna_max = ch->max_antenna_gain;
- ci->antenna_max = 0;
/* mode & flags share storage */
ci->mode = ch->mode;
@@ -3411,25 +3461,41 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
-int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
- const struct wmi_bcn_tx_arg *arg)
+/* This function assumes the beacon is already DMA mapped */
+int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
{
- struct wmi_bcn_tx_cmd *cmd;
+ struct wmi_bcn_tx_ref_cmd *cmd;
struct sk_buff *skb;
+ struct sk_buff *beacon = arvif->beacon;
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hdr *hdr;
int ret;
+ u16 fc;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
- cmd = (struct wmi_bcn_tx_cmd *)skb->data;
- cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id);
- cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate);
- cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
- cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
- memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
+ hdr = (struct ieee80211_hdr *)beacon->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
+ cmd->data_len = __cpu_to_le32(beacon->len);
+ cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
+ cmd->msdu_id = 0;
+ cmd->frame_control = __cpu_to_le32(fc);
+ cmd->flags = 0;
+
+ if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+ if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb,
+ ar->wmi.cmd->pdev_send_bcn_cmdid);
- ret = ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
if (ret)
dev_kfree_skb(skb);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4b5e7d3d32b6..4fcc96aa9513 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -2277,7 +2277,6 @@ struct wmi_pdev_param_map {
u32 bcnflt_stats_update_period;
u32 pmf_qos;
u32 arp_ac_override;
- u32 arpdhcp_ac_override;
u32 dcs;
u32 ani_enable;
u32 ani_poll_period;
@@ -3403,6 +3402,24 @@ struct wmi_bcn_tx_arg {
const void *bcn;
};
+enum wmi_bcn_tx_ref_flags {
+ WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1,
+ WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
+};
+
+struct wmi_bcn_tx_ref_cmd {
+ __le32 vdev_id;
+ __le32 data_len;
+ /* physical address of the frame - dma pointer */
+ __le32 data_ptr;
+ /* id for host to track */
+ __le32 msdu_id;
+ /* frame ctrl to setup PPDU desc */
+ __le32 frame_control;
+ /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
+ __le32 flags;
+} __packed;
+
/* Beacon filter */
#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */
#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */
@@ -3859,6 +3876,12 @@ enum wmi_peer_smps_state {
WMI_PEER_SMPS_DYNAMIC = 0x2
};
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+};
+
enum wmi_peer_param {
WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
WMI_PEER_AMPDU = 0x2,
@@ -4039,6 +4062,10 @@ struct wmi_chan_info_event {
__le32 cycle_count;
} __packed;
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
/* FIXME: empirically extrapolated */
@@ -4172,7 +4199,7 @@ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *);
-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g);
@@ -4219,8 +4246,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value);
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
- const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index ef35da84f63b..4b18434ba697 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -751,6 +751,9 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(ah->dev, bf->skbaddr))
+ return -ENOSPC;
+
ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
ARRAY_SIZE(bf->rates));
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 4ee01f654235..afb23b3cc7be 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -681,6 +681,7 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
survey->channel = conf->chandef.chan;
survey->noise = ah->ah_noise_floor;
survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_IN_USE |
SURVEY_INFO_CHANNEL_TIME |
SURVEY_INFO_CHANNEL_TIME_BUSY |
SURVEY_INFO_CHANNEL_TIME_RX |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index fd4c89df67e1..c2c6f4604958 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -790,7 +790,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
if (nw_type & ADHOC_NETWORK) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
cfg80211_put_bss(ar->wiphy, bss);
return;
}
@@ -861,13 +861,9 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
}
if (vif->nw_type & ADHOC_NETWORK) {
- if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
- return;
- }
- memset(bssid, 0, ETH_ALEN);
- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
@@ -3256,6 +3252,15 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
struct ath6kl_vif *vif = netdev_priv(dev);
u16 interval;
int ret, rssi_thold;
+ int n_match_sets = request->n_match_sets;
+
+ /*
+ * If there's a matchset w/o an SSID, then assume it's just for
+ * the RSSI (nothing else is currently supported) and ignore it.
+ * The device only supports a global RSSI filter that we set below.
+ */
+ if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
+ n_match_sets = 0;
if (ar->state != ATH6KL_STATE_ON)
return -EIO;
@@ -3268,11 +3273,11 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
request->n_ssids,
request->match_sets,
- request->n_match_sets);
+ n_match_sets);
if (ret < 0)
return ret;
- if (!request->n_match_sets) {
+ if (!n_match_sets) {
ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
ALL_BSS_FILTER, 0);
if (ret < 0)
@@ -3286,12 +3291,12 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
ar->fw_capabilities)) {
- if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
+ if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
rssi_thold = 0;
- else if (request->rssi_thold < -127)
+ else if (request->min_rssi_thold < -127)
rssi_thold = -127;
else
- rssi_thold = request->rssi_thold;
+ rssi_thold = request->min_rssi_thold;
ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
rssi_thold);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index f38ff6a6255e..56c3fd5cef65 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -24,7 +24,7 @@
/* constants */
#define TX_URB_COUNT 32
#define RX_URB_COUNT 32
-#define ATH6KL_USB_RX_BUFFER_SIZE 1700
+#define ATH6KL_USB_RX_BUFFER_SIZE 4096
/* tx/rx pipes for usb */
enum ATH6KL_USB_PIPE_ID {
@@ -481,8 +481,8 @@ static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
* ATH6KL_USB_RX_BUFFER_SIZE);
*/
- ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh =
- ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_alloc / 2;
+ ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh = 1;
+
ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA],
ATH6KL_USB_RX_BUFFER_SIZE);
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 4f16d79c9eb1..8b4ce28e3ce8 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -914,7 +914,7 @@ ath6kl_get_regpair(u16 regdmn)
return NULL;
for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
- if (regDomainPairs[i].regDmnEnum == regdmn)
+ if (regDomainPairs[i].reg_domain == regdmn)
return &regDomainPairs[i];
}
@@ -954,7 +954,7 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
country = ath6kl_regd_find_country_by_rd((u16) reg_code);
if (regpair)
ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
- regpair->regDmnEnum);
+ regpair->reg_domain);
else
ath6kl_warn("Regpair not found reg_code 0x%0x\n",
reg_code);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7b96b3e5712d..8fcc029a76a6 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -120,18 +120,6 @@ config ATH9K_WOW
This option enables Wake on Wireless LAN support for certain cards.
Currently, AR9462 is supported.
-config ATH9K_LEGACY_RATE_CONTROL
- bool "Atheros ath9k rate control"
- depends on ATH9K
- default n
- ---help---
- Say Y, if you want to use the ath9k specific rate control
- module instead of minstrel_ht. Be warned that there are various
- issues with the ath9k RC and minstrel is a more robust algorithm.
- Note that even if this option is selected, "ath9k_rate_control"
- has to be passed to mac80211 using the module parameter,
- ieee80211_default_rc_algo.
-
config ATH9K_RFKILL
bool "Atheros ath9k rfkill support" if EXPERT
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index a40e5c5d7418..8e1c7b0fe76c 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,6 @@ ath9k-y += beacon.o \
antenna.o
ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
-ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
@@ -52,7 +51,9 @@ ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
-ath9k_common-y:= common.o
+ath9k_common-y:= common.o \
+ common-init.o \
+ common-beacon.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 2dff2765769b..a0398fe3eb28 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -39,6 +39,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
.name = "qca955x_wmac",
.driver_data = AR9300_DEVID_QCA955X,
},
+ {
+ .name = "qca953x_wmac",
+ .driver_data = AR9300_DEVID_AR953X,
+ },
{},
};
@@ -82,6 +86,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
int irq;
int ret = 0;
struct ath_hw *ah;
+ struct ath_common *common;
char hw_name[64];
if (!dev_get_platdata(&pdev->dev)) {
@@ -124,9 +129,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->mem = mem;
sc->irq = irq;
- /* Will be cleared in ath9k_start() */
- set_bit(SC_OP_INVALID, &sc->sc_flags);
-
ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
@@ -144,6 +146,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)mem, irq);
+ common = ath9k_hw_common(sc->sc_ah);
+ /* Will be cleared in ath9k_start() */
+ set_bit(ATH_OP_INVALID, &common->op_flags);
return 0;
err_irq:
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index d28923b7435b..6d47783f2e5b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -176,16 +176,26 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
if (ah->opmode == NL80211_IFTYPE_STATION &&
BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
weak_sig = true;
-
/*
- * OFDM Weak signal detection is always enabled for AP mode.
+ * Newer chipsets are better at dealing with high PHY error counts -
+ * keep weak signal detection enabled when no RSSI threshold is
+ * available to determine if it is needed (mode != STA)
*/
- if (ah->opmode != NL80211_IFTYPE_AP &&
- aniState->ofdmWeakSigDetect != weak_sig) {
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- entry_ofdm->ofdm_weak_signal_on);
- }
+ else if (AR_SREV_9300_20_OR_LATER(ah) &&
+ ah->opmode != NL80211_IFTYPE_STATION)
+ weak_sig = true;
+
+ /* Older chipsets are more sensitive to high PHY error counts */
+ else if (!AR_SREV_9300_20_OR_LATER(ah) &&
+ aniState->ofdmNoiseImmunityLevel >= 8)
+ weak_sig = false;
+
+ if (aniState->ofdmWeakSigDetect != weak_sig)
+ ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ weak_sig);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ return;
if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
@@ -308,17 +318,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
BUG_ON(aniState == NULL);
ah->stats.ast_ani_reset++;
- /* only allow a subset of functions in AP mode */
- if (ah->opmode == NL80211_IFTYPE_AP) {
- if (IS_CHAN_2GHZ(chan)) {
- ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
- ATH9K_ANI_FIRSTEP_LEVEL);
- if (AR_SREV_9300_20_OR_LATER(ah))
- ah->ani_function |= ATH9K_ANI_MRC_CCK;
- } else
- ah->ani_function = 0;
- }
-
ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
aniState->ofdmNoiseImmunityLevel);
cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
@@ -483,10 +482,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath_dbg(common, ANI, "Initialize ANI\n");
- ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
- ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
- ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
- ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
+ ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
+ ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
+ } else {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
+ ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+ ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
+ }
ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 21e7b83c3f6a..c40965b4c1e2 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -22,12 +22,16 @@
/* units are errors per second */
#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
+#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
#define ATH9K_ANI_OFDM_TRIG_LOW 400
#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
+#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
#define ATH9K_ANI_CCK_TRIG_HIGH 600
+#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
#define ATH9K_ANI_CCK_TRIG_LOW 300
+#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100
#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
#define ATH9K_ANI_FIRSTEP_LVL 2
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index ff415e863ee9..3b3e91057a4c 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -26,10 +26,6 @@ static const int firstep_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
-static const int cycpwrThr1_table[] =
-/* level: 0 1 2 3 4 5 6 7 8 */
- { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
-
/*
* register values to turn OFDM weak signal detection OFF
*/
@@ -921,7 +917,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar5416AniState *aniState = &ah->ani;
- s32 value, value2;
+ s32 value;
switch (cmd & ah->ani_function) {
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
@@ -1008,42 +1004,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_FIRSTEP_LEVEL:{
u32 level = param;
- if (level >= ARRAY_SIZE(firstep_table)) {
- ath_dbg(common, ANI,
- "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(firstep_table));
- return false;
- }
-
- /*
- * make register setting relative to default
- * from INI file & cap value
- */
- value = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
- aniState->iniDef.firstep;
- if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
- value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
- if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
- value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+ value = level * 2;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRSTEP,
- value);
- /*
- * we need to set first step low register too
- * make register setting relative to default
- * from INI file & cap value
- */
- value2 = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
- aniState->iniDef.firstepLow;
- if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
- value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
- if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
- value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
-
+ AR_PHY_FIND_SIG_FIRSTEP, value);
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
- AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
+ AR_PHY_FIND_SIG_FIRSTEP_LOW, value);
if (level != aniState->firstepLevel) {
ath_dbg(common, ANI,
@@ -1060,7 +1025,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
aniState->firstepLevel,
level,
ATH9K_ANI_FIRSTEP_LVL,
- value2,
+ value,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
@@ -1073,41 +1038,13 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
u32 level = param;
- if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_dbg(common, ANI,
- "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(cycpwrThr1_table));
- return false;
- }
- /*
- * make register setting relative to default
- * from INI file & cap value
- */
- value = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
- aniState->iniDef.cycpwrThr1;
- if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
- value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
- if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
- value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ value = (level + 1) * 2;
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
- AR_PHY_TIMING5_CYCPWR_THR1,
- value);
+ AR_PHY_TIMING5_CYCPWR_THR1, value);
- /*
- * set AR_PHY_EXT_CCA for extension channel
- * make register setting relative to default
- * from INI file & cap value
- */
- value2 = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
- aniState->iniDef.cycpwrThr1Ext;
- if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
- value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
- if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
- value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
- REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
- AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
+ if (IS_CHAN_HT40(ah->curchan))
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_TIMING5_CYCPWR_THR1, value);
if (level != aniState->spurImmunityLevel) {
ath_dbg(common, ANI,
@@ -1124,7 +1061,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
aniState->spurImmunityLevel,
level,
ATH9K_ANI_SPUR_IMMUNE_LVL,
- value2,
+ value,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index a352128c40ad..ac8301ef5242 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -23,10 +23,11 @@
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
#define MAX_PHS_DELTA 10
+#define MAXIQCAL 3
struct coeff {
- int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
- int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
+ int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
+ int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
int iqc_coeff[2];
};
@@ -655,9 +656,6 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (i2_m_q2_a0_d1 > 0x800)
i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
- if (i2_p_q2_a0_d1 > 0x1000)
- i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
-
if (iq_corr_a0_d1 > 0x800)
iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
@@ -800,7 +798,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
- iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
+ iqc_coeff[0] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[0]);
@@ -831,7 +829,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
- iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
+ iqc_coeff[1] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[1]);
@@ -839,7 +837,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
return true;
}
-static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
+static void ar9003_hw_detect_outlier(int mp_coeff[][MAXIQCAL],
+ int nmeasurement,
int max_delta)
{
int mp_max = -64, max_idx = 0;
@@ -848,20 +847,20 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
/* find min/max mismatch across all calibrated gains */
for (i = 0; i < nmeasurement; i++) {
- if (mp_coeff[i] > mp_max) {
- mp_max = mp_coeff[i];
+ if (mp_coeff[i][0] > mp_max) {
+ mp_max = mp_coeff[i][0];
max_idx = i;
- } else if (mp_coeff[i] < mp_min) {
- mp_min = mp_coeff[i];
+ } else if (mp_coeff[i][0] < mp_min) {
+ mp_min = mp_coeff[i][0];
min_idx = i;
}
}
/* find average (exclude max abs value) */
for (i = 0; i < nmeasurement; i++) {
- if ((abs(mp_coeff[i]) < abs(mp_max)) ||
- (abs(mp_coeff[i]) < abs(mp_min))) {
- mp_avg += mp_coeff[i];
+ if ((abs(mp_coeff[i][0]) < abs(mp_max)) ||
+ (abs(mp_coeff[i][0]) < abs(mp_min))) {
+ mp_avg += mp_coeff[i][0];
mp_count++;
}
}
@@ -873,7 +872,7 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
if (mp_count)
mp_avg /= mp_count;
else
- mp_avg = mp_coeff[nmeasurement - 1];
+ mp_avg = mp_coeff[nmeasurement - 1][0];
/* detect outlier */
if (abs(mp_max - mp_min) > max_delta) {
@@ -882,15 +881,16 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
else
outlier_idx = min_idx;
- mp_coeff[outlier_idx] = mp_avg;
+ mp_coeff[outlier_idx][0] = mp_avg;
}
}
-static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
- struct coeff *coeff,
- bool is_reusable)
+static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
+ struct coeff *coeff,
+ bool is_reusable)
{
int i, im, nmeasurement;
+ int magnitude, phase;
u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
struct ath9k_hw_cal_data *caldata = ah->caldata;
@@ -920,21 +920,30 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
if (nmeasurement > MAX_MEASUREMENT)
nmeasurement = MAX_MEASUREMENT;
- /* detect outlier only if nmeasurement > 1 */
- if (nmeasurement > 1) {
- /* Detect magnitude outlier */
- ar9003_hw_detect_outlier(coeff->mag_coeff[i],
- nmeasurement, MAX_MAG_DELTA);
-
- /* Detect phase outlier */
- ar9003_hw_detect_outlier(coeff->phs_coeff[i],
- nmeasurement, MAX_PHS_DELTA);
+ /*
+ * Skip normal outlier detection for AR9550.
+ */
+ if (!AR_SREV_9550(ah)) {
+ /* detect outlier only if nmeasurement > 1 */
+ if (nmeasurement > 1) {
+ /* Detect magnitude outlier */
+ ar9003_hw_detect_outlier(coeff->mag_coeff[i],
+ nmeasurement,
+ MAX_MAG_DELTA);
+
+ /* Detect phase outlier */
+ ar9003_hw_detect_outlier(coeff->phs_coeff[i],
+ nmeasurement,
+ MAX_PHS_DELTA);
+ }
}
for (im = 0; im < nmeasurement; im++) {
+ magnitude = coeff->mag_coeff[i][im][0];
+ phase = coeff->phs_coeff[i][im][0];
- coeff->iqc_coeff[0] = (coeff->mag_coeff[i][im] & 0x7f) |
- ((coeff->phs_coeff[i][im] & 0x7f) << 7);
+ coeff->iqc_coeff[0] =
+ (phase & 0x7f) | ((magnitude & 0x7f) << 7);
if ((im % 2) == 0)
REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
@@ -991,7 +1000,63 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
return true;
}
-static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
+static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
+ struct coeff *coeff,
+ int i, int nmeasurement)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int im, ix, iy, temp;
+
+ for (im = 0; im < nmeasurement; im++) {
+ for (ix = 0; ix < MAXIQCAL - 1; ix++) {
+ for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
+ if (coeff->mag_coeff[i][im][iy] <
+ coeff->mag_coeff[i][im][ix]) {
+ temp = coeff->mag_coeff[i][im][ix];
+ coeff->mag_coeff[i][im][ix] =
+ coeff->mag_coeff[i][im][iy];
+ coeff->mag_coeff[i][im][iy] = temp;
+ }
+ if (coeff->phs_coeff[i][im][iy] <
+ coeff->phs_coeff[i][im][ix]) {
+ temp = coeff->phs_coeff[i][im][ix];
+ coeff->phs_coeff[i][im][ix] =
+ coeff->phs_coeff[i][im][iy];
+ coeff->phs_coeff[i][im][iy] = temp;
+ }
+ }
+ }
+ coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2];
+ coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2];
+
+ ath_dbg(common, CALIBRATE,
+ "IQCAL: Median [ch%d][gain%d]: mag = %d phase = %d\n",
+ i, im,
+ coeff->mag_coeff[i][im][0],
+ coeff->phs_coeff[i][im][0]);
+ }
+}
+
+static bool ar955x_tx_iq_cal_median(struct ath_hw *ah,
+ struct coeff *coeff,
+ int iqcal_idx,
+ int nmeasurement)
+{
+ int i;
+
+ if ((iqcal_idx + 1) != MAXIQCAL)
+ return false;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ __ar955x_tx_iq_cal_sort(ah, coeff, i, nmeasurement);
+ }
+
+ return true;
+}
+
+static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
+ int iqcal_idx,
+ bool is_reusable)
{
struct ath_common *common = ath9k_hw_common(ah);
const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
@@ -1004,10 +1069,11 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
AR_PHY_CHAN_INFO_TAB_1,
AR_PHY_CHAN_INFO_TAB_2,
};
- struct coeff coeff;
+ static struct coeff coeff;
s32 iq_res[6];
int i, im, j;
- int nmeasurement;
+ int nmeasurement = 0;
+ bool outlier_detect = true;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
@@ -1065,17 +1131,23 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
goto tx_iqcal_fail;
}
- coeff.mag_coeff[i][im] = coeff.iqc_coeff[0] & 0x7f;
- coeff.phs_coeff[i][im] =
+ coeff.phs_coeff[i][im][iqcal_idx] =
+ coeff.iqc_coeff[0] & 0x7f;
+ coeff.mag_coeff[i][im][iqcal_idx] =
(coeff.iqc_coeff[0] >> 7) & 0x7f;
- if (coeff.mag_coeff[i][im] > 63)
- coeff.mag_coeff[i][im] -= 128;
- if (coeff.phs_coeff[i][im] > 63)
- coeff.phs_coeff[i][im] -= 128;
+ if (coeff.mag_coeff[i][im][iqcal_idx] > 63)
+ coeff.mag_coeff[i][im][iqcal_idx] -= 128;
+ if (coeff.phs_coeff[i][im][iqcal_idx] > 63)
+ coeff.phs_coeff[i][im][iqcal_idx] -= 128;
}
}
- ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
+
+ if (AR_SREV_9550(ah))
+ outlier_detect = ar955x_tx_iq_cal_median(ah, &coeff,
+ iqcal_idx, nmeasurement);
+ if (outlier_detect)
+ ar9003_hw_tx_iq_cal_outlier_detection(ah, &coeff, is_reusable);
return;
@@ -1409,7 +1481,7 @@ skip_tx_iqcal:
}
if (txiqcal_done)
- ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, is_reusable);
else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
@@ -1455,14 +1527,38 @@ skip_tx_iqcal:
return true;
}
+static bool do_ar9003_agc_cal(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool status;
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms,"
+ "noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ return true;
+}
+
static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
bool txiqcal_done = false;
- bool is_reusable = true, status = true;
+ bool status = true;
bool run_agc_cal = false, sep_iq_cal = false;
+ int i = 0;
/* Use chip chainmask only for calibration */
ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
@@ -1485,7 +1581,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
* AGC calibration. Specifically, AR9550 in SoC chips.
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- txiqcal_done = true;
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
+ txiqcal_done = true;
+ } else {
+ txiqcal_done = false;
+ }
run_agc_cal = true;
} else {
sep_iq_cal = true;
@@ -1512,27 +1613,37 @@ skip_tx_iqcal:
if (AR_SREV_9330_11(ah))
ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- /* Poll for offset calibration complete */
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT);
- }
+ /*
+ * For non-AR9550 chips, we just trigger AGC calibration
+ * in the HW, poll for completion and then process
+ * the results.
+ *
+ * For AR955x, we run it multiple times and use
+ * median IQ correction.
+ */
+ if (!AR_SREV_9550(ah)) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
- if (!status) {
- ath_dbg(common, CALIBRATE,
- "offset calibration failed to complete in %d ms; noisy environment?\n",
- AH_WAIT_TIMEOUT / 1000);
- return false;
+ if (txiqcal_done)
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, false);
+ } else {
+ if (!txiqcal_done) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ } else {
+ for (i = 0; i < MAXIQCAL; i++) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ ar9003_hw_tx_iq_cal_post_proc(ah, i, false);
+ }
+ }
+ }
}
- if (txiqcal_done)
- ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
-
/* Revert chainmask to runtime parameters */
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b8daff78b9d1..235053ba7737 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -23,8 +23,8 @@
#define COMP_HDR_LEN 4
#define COMP_CKSUM_LEN 2
-#define LE16(x) __constant_cpu_to_le16(x)
-#define LE32(x) __constant_cpu_to_le32(x)
+#define LE16(x) cpu_to_le16(x)
+#define LE32(x) cpu_to_le32(x)
/* Local defines to distinguish between extension and control CTL's */
#define EXT_ADDITIVE (0x8000)
@@ -4792,43 +4792,54 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
tempslope:
if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+ u8 txmask = (eep->baseEepHeader.txrxMask & 0xf0) >> 4;
+
/*
* AR955x has tempSlope register for each chain.
* Check whether temp_compensation feature is enabled or not.
*/
if (eep->baseEepHeader.featureEnable & 0x1) {
if (frequency < 4000) {
- REG_RMW_FIELD(ah, AR_PHY_TPC_19,
- AR_PHY_TPC_19_ALPHA_THERM,
- eep->base_ext2.tempSlopeLow);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
- AR_PHY_TPC_19_ALPHA_THERM,
- temp_slope);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
- AR_PHY_TPC_19_ALPHA_THERM,
- eep->base_ext2.tempSlopeHigh);
+ if (txmask & BIT(0))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeLow);
+ if (txmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ if (txmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeHigh);
} else {
- REG_RMW_FIELD(ah, AR_PHY_TPC_19,
- AR_PHY_TPC_19_ALPHA_THERM,
- temp_slope);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
- AR_PHY_TPC_19_ALPHA_THERM,
- temp_slope1);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
- AR_PHY_TPC_19_ALPHA_THERM,
- temp_slope2);
+ if (txmask & BIT(0))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ if (txmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope1);
+ if (txmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope2);
}
} else {
/*
* If temp compensation is not enabled,
* set all registers to 0.
*/
- REG_RMW_FIELD(ah, AR_PHY_TPC_19,
- AR_PHY_TPC_19_ALPHA_THERM, 0);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
- AR_PHY_TPC_19_ALPHA_THERM, 0);
- REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
- AR_PHY_TPC_19_ALPHA_THERM, 0);
+ if (txmask & BIT(0))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ if (txmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ if (txmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
}
} else {
REG_RMW_FIELD(ah, AR_PHY_TPC_19,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 09facba1dc6d..8927fc34d84c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -868,10 +868,6 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
- if (IS_CHAN_QUARTER_RATE(chan))
- rfMode |= AR_PHY_MODE_QUARTER;
- if (IS_CHAN_HALF_RATE(chan))
- rfMode |= AR_PHY_MODE_HALF;
if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b5ac32cfbeb8..44d74495c4de 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -30,7 +30,6 @@
#include "spectral.h"
struct ath_node;
-struct ath_rate_table;
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
@@ -150,6 +149,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
+enum {
+ WLAN_RC_PHY_OFDM,
+ WLAN_RC_PHY_CCK,
+};
+
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
u32 axq_qnum; /* ath9k hardware queue number */
@@ -399,21 +403,10 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
#define ATH_BCBUF 8
#define ATH_DEFAULT_BINTVAL 100 /* TU */
#define ATH_DEFAULT_BMISS_LIMIT 10
-#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
#define TSF_TO_TU(_h,_l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
-struct ath_beacon_config {
- int beacon_interval;
- u16 listen_interval;
- u16 dtim_period;
- u16 bmiss_timeout;
- u8 dtim_count;
- bool enable_beacon;
- bool ibss_creator;
-};
-
struct ath_beacon {
enum {
OK, /* no change needed */
@@ -423,11 +416,9 @@ struct ath_beacon {
u32 beaconq;
u32 bmisscnt;
- u32 bc_tstamp;
struct ieee80211_vif *bslot[ATH_BCBUF];
int slottime;
int slotupdate;
- struct ath9k_tx_queue_info beacon_qi;
struct ath_descdma bdma;
struct ath_txq *cabq;
struct list_head bbuf;
@@ -442,7 +433,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
-bool ath9k_csa_is_finished(struct ath_softc *sc);
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_csa_update(struct ath_softc *sc);
/*******************/
/* Link Monitoring */
@@ -693,15 +685,6 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define MAX_GTT_CNT 5
-enum sc_op_flags {
- SC_OP_INVALID,
- SC_OP_BEACONS,
- SC_OP_ANI_RUN,
- SC_OP_PRIM_STA_VIF,
- SC_OP_HW_RESET,
- SC_OP_SCANNING,
-};
-
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
#define PS_WAIT_FOR_CAB BIT(1)
@@ -731,7 +714,6 @@ struct ath_softc {
struct completion paprd_complete;
wait_queue_head_t tx_wait;
- unsigned long sc_flags;
unsigned long driver_data;
u8 gtt_cnt;
@@ -748,7 +730,6 @@ struct ath_softc {
struct ath_rx rx;
struct ath_tx tx;
struct ath_beacon beacon;
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
#ifdef CONFIG_MAC80211_LEDS
bool led_registered;
@@ -757,7 +738,6 @@ struct ath_softc {
#endif
struct ath9k_hw_cal_data caldata;
- int last_rssi;
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
@@ -774,7 +754,6 @@ struct ath_softc {
#endif
struct ath_descdma txsdma;
- struct ieee80211_vif *csa_vif;
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 2e8bba0eb361..471e0f624e81 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -80,7 +80,7 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
u8 chainmask = ah->txchainmask;
u8 rate = 0;
- sband = &sc->sbands[common->hw->conf.chandef.chan->band];
+ sband = &common->sbands[common->hw->conf.chandef.chan->band];
rate = sband->bitrates[rateidx].hw_value;
if (vif->bss_conf.use_short_preamble)
rate |= sband->bitrates[rateidx].hw_value_short;
@@ -292,11 +292,8 @@ static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
(unsigned long long)tsfadjust, avp->av_bslot);
}
-bool ath9k_csa_is_finished(struct ath_softc *sc)
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ieee80211_vif *vif;
-
- vif = sc->csa_vif;
if (!vif || !vif->csa_active)
return false;
@@ -304,11 +301,23 @@ bool ath9k_csa_is_finished(struct ath_softc *sc)
return false;
ieee80211_csa_finish(vif);
-
- sc->csa_vif = NULL;
return true;
}
+static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = data;
+ ath9k_csa_is_finished(sc, vif);
+}
+
+void ath9k_csa_update(struct ath_softc *sc)
+{
+ ieee80211_iterate_active_interfaces(sc->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath9k_csa_update_vif,
+ sc);
+}
+
void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
@@ -319,7 +328,7 @@ void ath9k_beacon_tasklet(unsigned long data)
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
ath_dbg(common, RESET,
"reset work is pending, skip beaconing now\n");
return;
@@ -362,13 +371,13 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
}
- /* EDMA devices check that in the tx completion function. */
- if (!edma && ath9k_csa_is_finished(sc))
- return;
-
slot = ath9k_beacon_choose_slot(sc);
vif = sc->beacon.bslot[slot];
+ /* EDMA devices check that in the tx completion function. */
+ if (!edma && ath9k_csa_is_finished(sc, vif))
+ return;
+
if (!vif || !vif->bss_conf.enable_beacon)
return;
@@ -438,33 +447,6 @@ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
ath9k_hw_enable_interrupts(ah);
}
-/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
-static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
-{
- u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
-
- tsf_mod = tsf & (BIT(10) - 1);
- tsf_hi = tsf >> 32;
- tsf_lo = ((u32) tsf) >> 10;
-
- mod_hi = tsf_hi % div_tu;
- mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
-
- return (mod_lo << 10) | tsf_mod;
-}
-
-static u32 ath9k_get_next_tbtt(struct ath_softc *sc, u64 tsf,
- unsigned int interval)
-{
- struct ath_hw *ah = sc->sc_ah;
- unsigned int offset;
-
- tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
- offset = ath9k_mod_tsf64_tu(tsf, interval);
-
- return (u32) tsf + TU_TO_USEC(interval) - offset;
-}
-
/*
* For multi-bss ap support beacons are either staggered evenly over N slots or
* burst together. For the former arrange for the SWBA to be delivered for each
@@ -474,115 +456,18 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 nexttbtt, intval;
-
- /* NB: the beacon interval is kept internally in TU's */
- intval = TU_TO_USEC(conf->beacon_interval);
- intval /= ATH_BCBUF;
- nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
- conf->beacon_interval);
-
- if (conf->enable_beacon)
- ah->imask |= ATH9K_INT_SWBA;
- else
- ah->imask &= ~ATH9K_INT_SWBA;
-
- ath_dbg(common, BEACON,
- "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
- (conf->enable_beacon) ? "Enable" : "Disable",
- nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval, false);
+ ath9k_cmn_beacon_config_ap(ah, conf, ATH_BCBUF);
+ ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, false);
}
-/*
- * This sets up the beacon timers according to the timestamp of the last
- * received beacon and the current TSF, configures PCF and DTIM
- * handling, programs the sleep registers so the hardware will wakeup in
- * time to receive beacons, and configures the beacon miss handling so
- * we'll receive a BMISS interrupt when we stop seeing beacons from the AP
- * we've associated with.
- */
-static void ath9k_beacon_config_sta(struct ath_softc *sc,
+static void ath9k_beacon_config_sta(struct ath_hw *ah,
struct ath_beacon_config *conf)
{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
- int dtim_intval, sleepduration;
- u32 nexttbtt = 0, intval;
- u64 tsf;
- /* No need to configure beacon if we are not associated */
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
- ath_dbg(common, BEACON,
- "STA is not yet associated..skipping beacon config\n");
+ if (ath9k_cmn_beacon_config_sta(ah, conf, &bs) == -EPERM)
return;
- }
-
- memset(&bs, 0, sizeof(bs));
- intval = conf->beacon_interval;
-
- /*
- * Setup dtim parameters according to
- * last beacon we received (which may be none).
- */
- dtim_intval = intval * conf->dtim_period;
- sleepduration = conf->listen_interval * intval;
-
- /*
- * Pull nexttbtt forward to reflect the current
- * TSF and calculate dtim state for the result.
- */
- tsf = ath9k_hw_gettsf64(ah);
- nexttbtt = ath9k_get_next_tbtt(sc, tsf, intval);
-
- bs.bs_intval = TU_TO_USEC(intval);
- bs.bs_dtimperiod = conf->dtim_period * bs.bs_intval;
- bs.bs_nexttbtt = nexttbtt;
- bs.bs_nextdtim = nexttbtt;
- if (conf->dtim_period > 1)
- bs.bs_nextdtim = ath9k_get_next_tbtt(sc, tsf, dtim_intval);
-
- /*
- * Calculate the number of consecutive beacons to miss* before taking
- * a BMISS interrupt. The configuration is specified in TU so we only
- * need calculate based on the beacon interval. Note that we clamp the
- * result to at most 15 beacons.
- */
- if (sleepduration > intval) {
- bs.bs_bmissthreshold = conf->listen_interval *
- ATH_DEFAULT_BMISS_LIMIT / 2;
- } else {
- bs.bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, intval);
- if (bs.bs_bmissthreshold > 15)
- bs.bs_bmissthreshold = 15;
- else if (bs.bs_bmissthreshold <= 0)
- bs.bs_bmissthreshold = 1;
- }
-
- /*
- * Calculate sleep duration. The configuration is given in ms.
- * We ensure a multiple of the beacon period is used. Also, if the sleep
- * duration is greater than the DTIM period then it makes senses
- * to make it a multiple of that.
- *
- * XXX fixed at 100ms
- */
-
- bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
- sleepduration));
- if (bs.bs_sleepduration > bs.bs_dtimperiod)
- bs.bs_sleepduration = bs.bs_dtimperiod;
-
- /* TSF out of range threshold fixed at 1 second */
- bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
-
- ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration);
-
- /* Set the computed STA beacon timers */
ath9k_hw_disable_interrupts(ah);
ath9k_hw_set_sta_beacon_timers(ah, &bs);
@@ -597,36 +482,19 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- u32 intval, nexttbtt;
ath9k_reset_beacon_status(sc);
- intval = TU_TO_USEC(conf->beacon_interval);
-
- if (conf->ibss_creator)
- nexttbtt = intval;
- else
- nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
- conf->beacon_interval);
-
- if (conf->enable_beacon)
- ah->imask |= ATH9K_INT_SWBA;
- else
- ah->imask &= ~ATH9K_INT_SWBA;
-
- ath_dbg(common, BEACON,
- "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
- (conf->enable_beacon) ? "Enable" : "Disable",
- nexttbtt, intval, conf->beacon_interval);
+ ath9k_cmn_beacon_config_adhoc(ah, conf);
- ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator);
+ ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, conf->ibss_creator);
/*
* Set the global 'beacon has been configured' flag for the
* joiner case in IBSS mode.
*/
if (!conf->ibss_creator && conf->enable_beacon)
- set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
}
static bool ath9k_allow_beacon_config(struct ath_softc *sc,
@@ -646,7 +514,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
if ((vif->type == NL80211_IFTYPE_STATION) &&
- test_bit(SC_OP_BEACONS, &sc->sc_flags) &&
+ test_bit(ATH_OP_BEACONS, &common->op_flags) &&
!avp->primary_sta_vif) {
ath_dbg(common, CONFIG,
"Beacon already configured for a station interface\n");
@@ -668,7 +536,6 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
cur_conf->beacon_interval = bss_conf->beacon_int;
cur_conf->dtim_period = bss_conf->dtim_period;
- cur_conf->listen_interval = 1;
cur_conf->dtim_count = 1;
cur_conf->ibss_creator = bss_conf->ibss_creator;
cur_conf->bmiss_timeout =
@@ -698,6 +565,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
{
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
unsigned long flags;
bool skip_beacon = false;
@@ -710,7 +579,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_cache_beacon_config(sc, bss_conf);
ath9k_set_beacon(sc);
- set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
return;
}
@@ -749,13 +618,13 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
}
/*
- * Do not set the SC_OP_BEACONS flag for IBSS joiner mode
+ * Do not set the ATH_OP_BEACONS flag for IBSS joiner mode
* here, it is done in ath9k_beacon_config_adhoc().
*/
if (cur_conf->enable_beacon && !skip_beacon)
- set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ set_bit(ATH_OP_BEACONS, &common->op_flags);
else
- clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+ clear_bit(ATH_OP_BEACONS, &common->op_flags);
}
}
@@ -773,7 +642,7 @@ void ath9k_set_beacon(struct ath_softc *sc)
ath9k_beacon_config_adhoc(sc, cur_conf);
break;
case NL80211_IFTYPE_STATION:
- ath9k_beacon_config_sta(sc, cur_conf);
+ ath9k_beacon_config_sta(sc->sc_ah, cur_conf);
break;
default:
ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c
new file mode 100644
index 000000000000..775d1d20ce0b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "common.h"
+
+#define FUDGE 2
+
+/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
+static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
+{
+ u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
+
+ tsf_mod = tsf & (BIT(10) - 1);
+ tsf_hi = tsf >> 32;
+ tsf_lo = ((u32) tsf) >> 10;
+
+ mod_hi = tsf_hi % div_tu;
+ mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
+
+ return (mod_lo << 10) | tsf_mod;
+}
+
+static u32 ath9k_get_next_tbtt(struct ath_hw *ah, u64 tsf,
+ unsigned int interval)
+{
+ unsigned int offset;
+
+ tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
+ offset = ath9k_mod_tsf64_tu(tsf, interval);
+
+ return (u32) tsf + TU_TO_USEC(interval) - offset;
+}
+
+/*
+ * This sets up the beacon timers according to the timestamp of the last
+ * received beacon and the current TSF, configures PCF and DTIM
+ * handling, programs the sleep registers so the hardware will wakeup in
+ * time to receive beacons, and configures the beacon miss handling so
+ * we'll receive a BMISS interrupt when we stop seeing beacons from the AP
+ * we've associated with.
+ */
+int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ struct ath9k_beacon_state *bs)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int dtim_intval;
+ u64 tsf;
+
+ /* No need to configure beacon if we are not associated */
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
+ ath_dbg(common, BEACON,
+ "STA is not yet associated..skipping beacon config\n");
+ return -EPERM;
+ }
+
+ memset(bs, 0, sizeof(*bs));
+ conf->intval = conf->beacon_interval;
+
+ /*
+ * Setup dtim parameters according to
+ * last beacon we received (which may be none).
+ */
+ dtim_intval = conf->intval * conf->dtim_period;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF and calculate dtim state for the result.
+ */
+ tsf = ath9k_hw_gettsf64(ah);
+ conf->nexttbtt = ath9k_get_next_tbtt(ah, tsf, conf->intval);
+
+ bs->bs_intval = TU_TO_USEC(conf->intval);
+ bs->bs_dtimperiod = conf->dtim_period * bs->bs_intval;
+ bs->bs_nexttbtt = conf->nexttbtt;
+ bs->bs_nextdtim = conf->nexttbtt;
+ if (conf->dtim_period > 1)
+ bs->bs_nextdtim = ath9k_get_next_tbtt(ah, tsf, dtim_intval);
+
+ /*
+ * Calculate the number of consecutive beacons to miss* before taking
+ * a BMISS interrupt. The configuration is specified in TU so we only
+ * need calculate based on the beacon interval. Note that we clamp the
+ * result to at most 15 beacons.
+ */
+ bs->bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, conf->intval);
+ if (bs->bs_bmissthreshold > 15)
+ bs->bs_bmissthreshold = 15;
+ else if (bs->bs_bmissthreshold <= 0)
+ bs->bs_bmissthreshold = 1;
+
+ /*
+ * Calculate sleep duration. The configuration is given in ms.
+ * We ensure a multiple of the beacon period is used. Also, if the sleep
+ * duration is greater than the DTIM period then it makes senses
+ * to make it a multiple of that.
+ *
+ * XXX fixed at 100ms
+ */
+
+ bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+ conf->intval));
+ if (bs->bs_sleepduration > bs->bs_dtimperiod)
+ bs->bs_sleepduration = bs->bs_dtimperiod;
+
+ /* TSF out of range threshold fixed at 1 second */
+ bs->bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
+
+ ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
+ bs->bs_bmissthreshold, bs->bs_sleepduration);
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_sta);
+
+void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
+ struct ath_beacon_config *conf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ conf->intval = TU_TO_USEC(conf->beacon_interval);
+
+ if (conf->ibss_creator)
+ conf->nexttbtt = conf->intval;
+ else
+ conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
+
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
+
+ ath_dbg(common, BEACON,
+ "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
+ conf->nexttbtt, conf->intval, conf->beacon_interval);
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_adhoc);
+
+/*
+ * For multi-bss ap support beacons are either staggered evenly over N slots or
+ * burst together. For the former arrange for the SWBA to be delivered for each
+ * slot. Slots that are not occupied will generate nothing.
+ */
+void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ unsigned int bc_buf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* NB: the beacon interval is kept internally in TU's */
+ conf->intval = TU_TO_USEC(conf->beacon_interval);
+ conf->intval /= bc_buf;
+ conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
+ conf->beacon_interval);
+
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
+
+ ath_dbg(common, BEACON,
+ "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
+ conf->nexttbtt, conf->intval, conf->beacon_interval);
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_ap);
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.h b/drivers/net/wireless/ath/ath9k/common-beacon.h
new file mode 100644
index 000000000000..3665d27f0dc7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+struct ath_beacon_config;
+
+int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ struct ath9k_beacon_state *bs);
+void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
+ struct ath_beacon_config *conf);
+void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
+ struct ath_beacon_config *conf,
+ unsigned int bc_buf);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
new file mode 100644
index 000000000000..a006c1499728
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* We use the hw_value as an index into our private channel structure */
+
+#include "common.h"
+
+#define CHAN2G(_freq, _idx) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+/* Some 2 GHz radios are actually tunable on 2312-2732
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Some 5 GHz radios are actually tunable on XXXX-YYYY
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 14), /* Channel 36 */
+ CHAN5G(5200, 15), /* Channel 40 */
+ CHAN5G(5220, 16), /* Channel 44 */
+ CHAN5G(5240, 17), /* Channel 48 */
+ /* _We_ call this UNII 2 */
+ CHAN5G(5260, 18), /* Channel 52 */
+ CHAN5G(5280, 19), /* Channel 56 */
+ CHAN5G(5300, 20), /* Channel 60 */
+ CHAN5G(5320, 21), /* Channel 64 */
+ /* _We_ call this "Middle band" */
+ CHAN5G(5500, 22), /* Channel 100 */
+ CHAN5G(5520, 23), /* Channel 104 */
+ CHAN5G(5540, 24), /* Channel 108 */
+ CHAN5G(5560, 25), /* Channel 112 */
+ CHAN5G(5580, 26), /* Channel 116 */
+ CHAN5G(5600, 27), /* Channel 120 */
+ CHAN5G(5620, 28), /* Channel 124 */
+ CHAN5G(5640, 29), /* Channel 128 */
+ CHAN5G(5660, 30), /* Channel 132 */
+ CHAN5G(5680, 31), /* Channel 136 */
+ CHAN5G(5700, 32), /* Channel 140 */
+ /* _We_ call this UNII 3 */
+ CHAN5G(5745, 33), /* Channel 149 */
+ CHAN5G(5765, 34), /* Channel 153 */
+ CHAN5G(5785, 35), /* Channel 157 */
+ CHAN5G(5805, 36), /* Channel 161 */
+ CHAN5G(5825, 37), /* Channel 165 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+};
+
+int ath9k_cmn_init_channels_rates(struct ath_common *common)
+{
+ struct ath_hw *ah = (struct ath_hw *)common->ah;
+ void *channels;
+
+ BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
+ ARRAY_SIZE(ath9k_5ghz_chantable) !=
+ ATH9K_NUM_CHANNELS);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
+ channels = devm_kzalloc(ah->dev,
+ sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ memcpy(channels, ath9k_2ghz_chantable,
+ sizeof(ath9k_2ghz_chantable));
+ common->sbands[IEEE80211_BAND_2GHZ].channels = channels;
+ common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ common->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_chantable);
+ common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ common->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
+ channels = devm_kzalloc(ah->dev,
+ sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ memcpy(channels, ath9k_5ghz_chantable,
+ sizeof(ath9k_5ghz_chantable));
+ common->sbands[IEEE80211_BAND_5GHZ].channels = channels;
+ common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+ common->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ ARRAY_SIZE(ath9k_5ghz_chantable);
+ common->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ ath9k_legacy_rates + 4;
+ common->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates) - 4;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_init_channels_rates);
+
+void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 tx_streams, rx_streams;
+ int i, max_streams;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ if (AR_SREV_9271(ah) || AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
+ max_streams = 1;
+ else if (AR_SREV_9462(ah))
+ max_streams = 2;
+ else if (AR_SREV_9300_20_OR_LATER(ah))
+ max_streams = 3;
+ else
+ max_streams = 2;
+
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ if (max_streams >= 2)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ }
+
+ /* set up supported mcs set */
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
+ rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
+
+ ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
+
+ if (tx_streams != rx_streams) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+
+ for (i = 0; i < rx_streams; i++)
+ ht_info->mcs.rx_mask[i] = 0xff;
+
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+EXPORT_SYMBOL(ath9k_cmn_setup_ht_cap);
+
+void ath9k_cmn_reload_chainmask(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_HT))
+ return;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ ath9k_cmn_setup_ht_cap(ah,
+ &common->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+ ath9k_cmn_setup_ht_cap(ah,
+ &common->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+}
+EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.h b/drivers/net/wireless/ath/ath9k/common-init.h
new file mode 100644
index 000000000000..ac03fca5ffdd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-init.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+int ath9k_cmn_init_channels_rates(struct ath_common *common);
+void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
+ struct ieee80211_sta_ht_cap *ht_info);
+void ath9k_cmn_reload_chainmask(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 768c733cad31..c6dd7f1fed65 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -27,6 +27,250 @@ MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
+/* Assumes you've already done the endian to CPU conversion */
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter)
+{
+ struct ath_hw *ah = common->ah;
+ bool is_mc, is_valid_tkip, strip_mic, mic_error;
+ __le16 fc;
+
+ fc = hdr->frame_control;
+
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
+ is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap);
+ strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
+ ieee80211_has_protected(fc) &&
+ !(rx_stats->rs_status &
+ (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS));
+
+ /*
+ * Key miss events are only relevant for pairwise keys where the
+ * descriptor does contain a valid key index. This has been observed
+ * mostly with CCMP encryption.
+ */
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
+ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
+ mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
+ !ieee80211_has_morefrags(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
+ (rx_stats->rs_status & ATH9K_RXERR_MIC);
+
+ /*
+ * The rx_stats->rs_status will not be set until the end of the
+ * chained descriptors so it can be ignored if rs_more is set. The
+ * rs_more will be false at the last element of the chained
+ * descriptors.
+ */
+ if (rx_stats->rs_status != 0) {
+ u8 status_mask;
+
+ if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
+ rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+ mic_error = false;
+ }
+
+ if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
+ (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
+ *decrypt_error = true;
+ mic_error = false;
+ }
+
+
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS;
+
+ if (ah->is_monitoring && (rxfilter & FIF_FCSFAIL))
+ status_mask |= ATH9K_RXERR_CRC;
+
+ if (rx_stats->rs_status & ~status_mask)
+ return false;
+ }
+
+ /*
+ * For unicast frames the MIC error bit can have false positives,
+ * so all MIC error reports need to be validated in software.
+ * False negatives are not common, so skip software verification
+ * if the hardware considers the MIC valid.
+ */
+ if (strip_mic)
+ rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+ else if (is_mc && mic_error)
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_accept);
+
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error)
+{
+ struct ath_hw *ah = common->ah;
+ struct ieee80211_hdr *hdr;
+ int hdrlen, padpos, padsize;
+ u8 keyix;
+ __le16 fc;
+
+ /* see if any padding is done by the hw and remove it */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+ padpos = ieee80211_hdrlen(fc);
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - padpos % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ padsize = padpos & 3;
+ if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ keyix = rx_stats->rs_keyix;
+
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc)
+ && !decrypt_error && skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+
+ if (test_bit(keyix, common->keymap))
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ }
+ if (ah->sw_mgmt_crypto &&
+ (rxs->flag & RX_FLAG_DECRYPTED) &&
+ ieee80211_is_mgmt(fc))
+ /* Use software decrypt for management frames. */
+ rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
+
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+ struct ath_hw *ah = common->ah;
+
+ band = ah->curchan->chan->band;
+ sband = hw->wiphy->bands[band];
+
+ if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_5MHZ;
+ else if (IS_CHAN_HALF_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_10MHZ;
+
+ if (rx_stats->rs_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ rxs->flag |= rx_stats->flag;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
+ }
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ rxs->rate_idx = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rate);
+
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ath_hw *ah = common->ah;
+ int last_rssi;
+ int rssi = rx_stats->rs_rssi;
+ int i, j;
+
+ /*
+ * RSSI is not available for subframes in an A-MPDU.
+ */
+ if (rx_stats->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ /*
+ * Check if the RSSI for the last subframe in an A-MPDU
+ * or an unaggregated frame is valid.
+ */
+ if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+ s8 rssi;
+
+ if (!(ah->rxchainmask & BIT(i)))
+ continue;
+
+ rssi = rx_stats->rs_rssi_ctl[i];
+ if (rssi != ATH9K_RSSI_BAD) {
+ rxs->chains |= BIT(j);
+ rxs->chain_signal[j] = ah->noise + rssi;
+ }
+ j++;
+ }
+
+ /*
+ * Update Beacon RSSI, this is used by ANI.
+ */
+ if (rx_stats->is_mybeacon &&
+ ((ah->opmode == NL80211_IFTYPE_STATION) ||
+ (ah->opmode == NL80211_IFTYPE_ADHOC))) {
+ ATH_RSSI_LPF(common->last_rssi, rx_stats->rs_rssi);
+ last_rssi = common->last_rssi;
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ if (rssi < 0)
+ rssi = 0;
+
+ ah->stats.avgbrssi = rssi;
+ }
+
+ rxs->signal = ah->noise + rx_stats->rs_rssi;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rssi);
+
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index eb85e1bdca88..ca38116838f0 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -21,6 +21,9 @@
#include "hw.h"
#include "hw-ops.h"
+#include "common-init.h"
+#include "common-beacon.h"
+
/* Common header for Atheros 802.11n base driver cores */
#define WME_BA_BMP_SIZE 64
@@ -42,6 +45,38 @@
#define ATH_EP_RND(x, mul) \
(((x) + ((mul)/2)) / (mul))
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+
+struct ath_beacon_config {
+ int beacon_interval;
+ u16 dtim_period;
+ u16 bmiss_timeout;
+ u8 dtim_count;
+ bool enable_beacon;
+ bool ibss_creator;
+ u32 nexttbtt;
+ u32 intval;
+};
+
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter);
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error);
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index ab7264c1d8f7..780ff1bee6f6 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -135,46 +135,45 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
- unsigned int len = 0, size = 1024;
+ unsigned int len = 0;
+ const unsigned int size = 1024;
ssize_t retval = 0;
char *buf;
+ int i;
+ struct {
+ const char *name;
+ unsigned int val;
+ } ani_info[] = {
+ { "ANI RESET", ah->stats.ast_ani_reset },
+ { "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel },
+ { "CCK LEVEL", ah->ani.cckNoiseImmunityLevel },
+ { "SPUR UP", ah->stats.ast_ani_spurup },
+ { "SPUR DOWN", ah->stats.ast_ani_spurup },
+ { "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon },
+ { "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff },
+ { "MRC-CCK ON", ah->stats.ast_ani_ccklow },
+ { "MRC-CCK OFF", ah->stats.ast_ani_cckhigh },
+ { "FIR-STEP UP", ah->stats.ast_ani_stepup },
+ { "FIR-STEP DOWN", ah->stats.ast_ani_stepdown },
+ { "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero },
+ { "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs },
+ { "CCK ERRORS", ah->stats.ast_ani_cckerrs },
+ };
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- if (common->disable_ani) {
- len += scnprintf(buf + len, size - len, "%s: %s\n",
- "ANI", "DISABLED");
+ len += scnprintf(buf + len, size - len, "%15s: %s\n", "ANI",
+ common->disable_ani ? "DISABLED" : "ENABLED");
+
+ if (common->disable_ani)
goto exit;
- }
- len += scnprintf(buf + len, size - len, "%15s: %s\n",
- "ANI", "ENABLED");
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "ANI RESET", ah->stats.ast_ani_reset);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR UP", ah->stats.ast_ani_spurup);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "SPUR DOWN", ah->stats.ast_ani_spurup);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK ON", ah->stats.ast_ani_ccklow);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP UP", ah->stats.ast_ani_stepup);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
- len += scnprintf(buf + len, size - len, "%15s: %u\n",
- "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+ for (i = 0; i < ARRAY_SIZE(ani_info); i++)
+ len += scnprintf(buf + len, size - len, "%15s: %u\n",
+ ani_info[i].name, ani_info[i].val);
+
exit:
if (len > size)
len = size;
@@ -209,7 +208,7 @@ static ssize_t write_file_ani(struct file *file,
common->disable_ani = !ani;
if (common->disable_ani) {
- clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_stop_ani(sc);
} else {
ath_check_ani(sc);
@@ -307,13 +306,13 @@ static ssize_t read_file_antenna_diversity(struct file *file,
struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
struct ath_hw_antcomb_conf div_ant_conf;
- unsigned int len = 0, size = 1024;
+ unsigned int len = 0;
+ const unsigned int size = 1024;
ssize_t retval = 0;
char *buf;
- char *lna_conf_str[4] = {"LNA1_MINUS_LNA2",
- "LNA2",
- "LNA1",
- "LNA1_PLUS_LNA2"};
+ static const char *lna_conf_str[4] = {
+ "LNA1_MINUS_LNA2", "LNA2", "LNA1", "LNA1_PLUS_LNA2"
+ };
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
@@ -716,10 +715,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
struct ath_softc *sc = file->private_data;
struct ath_txq *txq;
char *buf;
- unsigned int len = 0, size = 1024;
+ unsigned int len = 0;
+ const unsigned int size = 1024;
ssize_t retval = 0;
int i;
- char *qname[4] = {"VO", "VI", "BE", "BK"};
+ static const char *qname[4] = {
+ "VO", "VI", "BE", "BK"
+ };
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
@@ -866,6 +868,12 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
"%17s: %2d\n", "PLL RX Hang",
sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "MAC Hang",
+ sc->debug.stats.reset[RESET_TYPE_MAC_HANG]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "%17s: %2d\n", "Stuck Beacon",
+ sc->debug.stats.reset[RESET_TYPE_BEACON_STUCK]);
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%17s: %2d\n", "MCI Reset",
sc->debug.stats.reset[RESET_TYPE_MCI]);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index cc7a025d833e..559a68c2709c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -18,7 +18,6 @@
#define DEBUG_H
#include "hw.h"
-#include "rc.h"
#include "dfs_debug.h"
struct ath_txq;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index 0a7ddf4c88c9..7936c9126a20 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -21,6 +21,8 @@
#include "hw.h"
+struct ath_softc;
+
/**
* struct ath_dfs_stats - DFS Statistics per wiphy
* @pulses_total: pulses reported by HW
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 6d5d716adc1b..8e7153b186ed 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -54,6 +54,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
.driver_info = AR9280_USB }, /* SMC Networks */
{ USB_DEVICE(0x0411, 0x017f),
.driver_info = AR9280_USB }, /* Sony UWA-BR100 */
+ { USB_DEVICE(0x0411, 0x0197),
+ .driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */
{ USB_DEVICE(0x04da, 0x3904),
.driver_info = AR9280_USB },
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 99a203174f45..dab1f0cab993 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -39,7 +39,6 @@
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
#define ATH_DEFAULT_BMISS_LIMIT 10
-#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
#define TSF_TO_TU(_h, _l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
@@ -277,7 +276,6 @@ struct ath9k_htc_rxbuf {
};
struct ath9k_htc_rx {
- int last_rssi; /* FIXME: per-STA */
struct list_head rxbuf;
spinlock_t rxbuflock;
};
@@ -407,12 +405,18 @@ static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
#define DEFAULT_SWBA_RESPONSE 40 /* in TUs */
#define MIN_SWBA_RESPONSE 10 /* in TUs */
-struct htc_beacon_config {
+struct htc_beacon {
+ enum {
+ OK, /* no change needed */
+ UPDATE, /* update pending */
+ COMMIT /* beacon sent, commit change */
+ } updateslot; /* slot time update fsm */
+
struct ieee80211_vif *bslot[ATH9K_HTC_MAX_BCN_VIF];
- u16 beacon_interval;
- u16 dtim_period;
- u16 bmiss_timeout;
- u32 bmiss_cnt;
+ u32 bmisscnt;
+ u32 beaconq;
+ int slottime;
+ int slotupdate;
};
struct ath_btcoex {
@@ -440,12 +444,8 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
-#define OP_INVALID BIT(0)
-#define OP_SCANNING BIT(1)
-#define OP_ENABLE_BEACON BIT(2)
#define OP_BT_PRIORITY_DETECTED BIT(3)
#define OP_BT_SCAN BIT(4)
-#define OP_ANI_RUNNING BIT(5)
#define OP_TSF_RESET BIT(6)
struct ath9k_htc_priv {
@@ -488,10 +488,10 @@ struct ath9k_htc_priv {
unsigned long op_flags;
struct ath9k_hw_cal_data caldata;
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
spinlock_t beacon_lock;
- struct htc_beacon_config cur_beacon_conf;
+ struct ath_beacon_config cur_beacon_conf;
+ struct htc_beacon beacon;
struct ath9k_htc_rx rx;
struct ath9k_htc_tx tx;
@@ -516,7 +516,6 @@ struct ath9k_htc_priv {
struct work_struct led_work;
#endif
- int beaconq;
int cabq;
int hwq_map[IEEE80211_NUM_ACS];
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 8b5757734596..e8b6ec3c1dbb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -26,7 +26,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
- ath9k_hw_get_txq_props(ah, priv->beaconq, &qi);
+ ath9k_hw_get_txq_props(ah, priv->beacon.beaconq, &qi);
if (priv->ah->opmode == NL80211_IFTYPE_AP ||
priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) {
@@ -54,220 +54,78 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
}
- if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
+ if (!ath9k_hw_set_txq_props(ah, priv->beacon.beaconq, &qi)) {
ath_err(ath9k_hw_common(ah),
- "Unable to update beacon queue %u!\n", priv->beaconq);
+ "Unable to update beacon queue %u!\n", priv->beacon.beaconq);
} else {
- ath9k_hw_resettxqueue(ah, priv->beaconq);
+ ath9k_hw_resettxqueue(ah, priv->beacon.beaconq);
}
}
-
-static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
- struct htc_beacon_config *bss_conf)
+/*
+ * Both nexttbtt and intval have to be in usecs.
+ */
+static void ath9k_htc_beacon_init(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *conf,
+ bool reset_tsf)
{
- struct ath_common *common = ath9k_hw_common(priv->ah);
- struct ath9k_beacon_state bs;
- enum ath9k_int imask = 0;
- int dtimperiod, dtimcount, sleepduration;
- int bmiss_timeout;
- u32 nexttbtt = 0, intval, tsftu;
- __be32 htc_imask = 0;
- u64 tsf;
- int num_beacons, offset, dtim_dec_count;
+ struct ath_hw *ah = priv->ah;
int ret __attribute__ ((unused));
+ __be32 htc_imask = 0;
u8 cmd_rsp;
- memset(&bs, 0, sizeof(bs));
-
- intval = bss_conf->beacon_interval;
- bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
-
- /*
- * Setup dtim parameters according to
- * last beacon we received (which may be none).
- */
- dtimperiod = bss_conf->dtim_period;
- if (dtimperiod <= 0) /* NB: 0 if not known */
- dtimperiod = 1;
- dtimcount = 1;
- if (dtimcount >= dtimperiod) /* NB: sanity check */
- dtimcount = 0;
-
- sleepduration = intval;
- if (sleepduration <= 0)
- sleepduration = intval;
-
- /*
- * Pull nexttbtt forward to reflect the current
- * TSF and calculate dtim state for the result.
- */
- tsf = ath9k_hw_gettsf64(priv->ah);
- tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
-
- num_beacons = tsftu / intval + 1;
- offset = tsftu % intval;
- nexttbtt = tsftu - offset;
- if (offset)
- nexttbtt += intval;
-
- /* DTIM Beacon every dtimperiod Beacon */
- dtim_dec_count = num_beacons % dtimperiod;
- dtimcount -= dtim_dec_count;
- if (dtimcount < 0)
- dtimcount += dtimperiod;
-
- bs.bs_intval = TU_TO_USEC(intval);
- bs.bs_nexttbtt = TU_TO_USEC(nexttbtt);
- bs.bs_dtimperiod = dtimperiod * bs.bs_intval;
- bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * bs.bs_intval;
-
- /*
- * Calculate the number of consecutive beacons to miss* before taking
- * a BMISS interrupt. The configuration is specified in TU so we only
- * need calculate based on the beacon interval. Note that we clamp the
- * result to at most 15 beacons.
- */
- if (sleepduration > intval) {
- bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
- } else {
- bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
- if (bs.bs_bmissthreshold > 15)
- bs.bs_bmissthreshold = 15;
- else if (bs.bs_bmissthreshold <= 0)
- bs.bs_bmissthreshold = 1;
- }
-
- /*
- * Calculate sleep duration. The configuration is given in ms.
- * We ensure a multiple of the beacon period is used. Also, if the sleep
- * duration is greater than the DTIM period then it makes senses
- * to make it a multiple of that.
- *
- * XXX fixed at 100ms
- */
-
- bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
- sleepduration));
- if (bs.bs_sleepduration > bs.bs_dtimperiod)
- bs.bs_sleepduration = bs.bs_dtimperiod;
-
- /* TSF out of range threshold fixed at 1 second */
- bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
-
- ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
- intval, tsf, tsftu);
- ath_dbg(common, CONFIG, "bmiss: %u sleep: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration);
-
- /* Set the computed STA beacon timers */
+ if (conf->intval >= TU_TO_USEC(DEFAULT_SWBA_RESPONSE))
+ ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
+ else
+ ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
WMI_CMD(WMI_DISABLE_INTR_CMDID);
- ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
- imask |= ATH9K_INT_BMISS;
- htc_imask = cpu_to_be32(imask);
+ if (reset_tsf)
+ ath9k_hw_reset_tsf(ah);
+ ath9k_htc_beaconq_config(priv);
+ ath9k_hw_beaconinit(ah, conf->nexttbtt, conf->intval);
+ priv->beacon.bmisscnt = 0;
+ htc_imask = cpu_to_be32(ah->imask);
WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
}
-static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
- struct htc_beacon_config *bss_conf)
+static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *bss_conf)
{
- struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_beacon_state bs;
enum ath9k_int imask = 0;
- u32 nexttbtt, intval, tsftu;
__be32 htc_imask = 0;
int ret __attribute__ ((unused));
u8 cmd_rsp;
- u64 tsf;
- intval = bss_conf->beacon_interval;
- intval /= ATH9K_HTC_MAX_BCN_VIF;
- nexttbtt = intval;
-
- /*
- * To reduce beacon misses under heavy TX load,
- * set the beacon response time to a larger value.
- */
- if (intval > DEFAULT_SWBA_RESPONSE)
- priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
- else
- priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
-
- if (test_bit(OP_TSF_RESET, &priv->op_flags)) {
- ath9k_hw_reset_tsf(priv->ah);
- clear_bit(OP_TSF_RESET, &priv->op_flags);
- } else {
- /*
- * Pull nexttbtt forward to reflect the current TSF.
- */
- tsf = ath9k_hw_gettsf64(priv->ah);
- tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
- do {
- nexttbtt += intval;
- } while (nexttbtt < tsftu);
- }
-
- if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
- imask |= ATH9K_INT_SWBA;
-
- ath_dbg(common, CONFIG,
- "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n",
- bss_conf->beacon_interval, nexttbtt,
- priv->ah->config.sw_beacon_response_time, imask);
-
- ath9k_htc_beaconq_config(priv);
+ if (ath9k_cmn_beacon_config_sta(priv->ah, bss_conf, &bs) == -EPERM)
+ return;
WMI_CMD(WMI_DISABLE_INTR_CMDID);
- ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
- priv->cur_beacon_conf.bmiss_cnt = 0;
+ ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
+ imask |= ATH9K_INT_BMISS;
htc_imask = cpu_to_be32(imask);
WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
}
-static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
- struct htc_beacon_config *bss_conf)
+static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *conf)
{
- struct ath_common *common = ath9k_hw_common(priv->ah);
- enum ath9k_int imask = 0;
- u32 nexttbtt, intval, tsftu;
- __be32 htc_imask = 0;
- int ret __attribute__ ((unused));
- u8 cmd_rsp;
- u64 tsf;
-
- intval = bss_conf->beacon_interval;
- nexttbtt = intval;
-
- /*
- * Pull nexttbtt forward to reflect the current TSF.
- */
- tsf = ath9k_hw_gettsf64(priv->ah);
- tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
- do {
- nexttbtt += intval;
- } while (nexttbtt < tsftu);
-
- /*
- * Only one IBSS interfce is allowed.
- */
- if (intval > DEFAULT_SWBA_RESPONSE)
- priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
- else
- priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
+ struct ath_hw *ah = priv->ah;
+ ah->imask = 0;
- if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
- imask |= ATH9K_INT_SWBA;
+ ath9k_cmn_beacon_config_ap(ah, conf, ATH9K_HTC_MAX_BCN_VIF);
+ ath9k_htc_beacon_init(priv, conf, false);
+}
- ath_dbg(common, CONFIG,
- "IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n",
- bss_conf->beacon_interval, nexttbtt,
- priv->ah->config.sw_beacon_response_time, imask);
+static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
+ struct ath_beacon_config *conf)
+{
+ struct ath_hw *ah = priv->ah;
+ ah->imask = 0;
- WMI_CMD(WMI_DISABLE_INTR_CMDID);
- ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
- priv->cur_beacon_conf.bmiss_cnt = 0;
- htc_imask = cpu_to_be32(imask);
- WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+ ath9k_cmn_beacon_config_adhoc(ah, conf);
+ ath9k_htc_beacon_init(priv, conf, conf->ibss_creator);
}
void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
@@ -287,7 +145,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
spin_lock_bh(&priv->beacon_lock);
- vif = priv->cur_beacon_conf.bslot[slot];
+ vif = priv->beacon.bslot[slot];
skb = ieee80211_get_buffered_bc(priv->hw, vif);
@@ -348,10 +206,10 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
spin_lock_bh(&priv->beacon_lock);
- vif = priv->cur_beacon_conf.bslot[slot];
+ vif = priv->beacon.bslot[slot];
avp = (struct ath9k_htc_vif *)vif->drv_priv;
- if (unlikely(test_bit(OP_SCANNING, &priv->op_flags))) {
+ if (unlikely(test_bit(ATH_OP_SCANNING, &common->op_flags))) {
spin_unlock_bh(&priv->beacon_lock);
return;
}
@@ -431,8 +289,8 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
int slot;
if (swba->beacon_pending != 0) {
- priv->cur_beacon_conf.bmiss_cnt++;
- if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) {
+ priv->beacon.bmisscnt++;
+ if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {
ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
ieee80211_queue_work(priv->hw,
&priv->fatal_work);
@@ -440,16 +298,16 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
return;
}
- if (priv->cur_beacon_conf.bmiss_cnt) {
+ if (priv->beacon.bmisscnt) {
ath_dbg(common, BSTUCK,
"Resuming beacon xmit after %u misses\n",
- priv->cur_beacon_conf.bmiss_cnt);
- priv->cur_beacon_conf.bmiss_cnt = 0;
+ priv->beacon.bmisscnt);
+ priv->beacon.bmisscnt = 0;
}
slot = ath9k_htc_choose_bslot(priv, swba);
spin_lock_bh(&priv->beacon_lock);
- if (priv->cur_beacon_conf.bslot[slot] == NULL) {
+ if (priv->beacon.bslot[slot] == NULL) {
spin_unlock_bh(&priv->beacon_lock);
return;
}
@@ -468,13 +326,13 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
spin_lock_bh(&priv->beacon_lock);
for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) {
- if (priv->cur_beacon_conf.bslot[i] == NULL) {
+ if (priv->beacon.bslot[i] == NULL) {
avp->bslot = i;
break;
}
}
- priv->cur_beacon_conf.bslot[avp->bslot] = vif;
+ priv->beacon.bslot[avp->bslot] = vif;
spin_unlock_bh(&priv->beacon_lock);
ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
@@ -488,7 +346,7 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
spin_lock_bh(&priv->beacon_lock);
- priv->cur_beacon_conf.bslot[avp->bslot] = NULL;
+ priv->beacon.bslot[avp->bslot] = NULL;
spin_unlock_bh(&priv->beacon_lock);
ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
@@ -504,7 +362,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
- struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
u64 tsfadjust;
if (avp->bslot == 0)
@@ -536,7 +394,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
- struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
bool beacon_configured;
@@ -591,7 +449,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
- struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
@@ -627,7 +485,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
- struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
switch (priv->ah->opmode) {
case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index c57d6b859c04..8a3bd5fe3a54 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -38,93 +38,6 @@ static int ath9k_ps_enable;
module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
-#define CHAN2G(_freq, _idx) { \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-static struct ieee80211_channel ath9k_2ghz_channels[] = {
- CHAN2G(2412, 0), /* Channel 1 */
- CHAN2G(2417, 1), /* Channel 2 */
- CHAN2G(2422, 2), /* Channel 3 */
- CHAN2G(2427, 3), /* Channel 4 */
- CHAN2G(2432, 4), /* Channel 5 */
- CHAN2G(2437, 5), /* Channel 6 */
- CHAN2G(2442, 6), /* Channel 7 */
- CHAN2G(2447, 7), /* Channel 8 */
- CHAN2G(2452, 8), /* Channel 9 */
- CHAN2G(2457, 9), /* Channel 10 */
- CHAN2G(2462, 10), /* Channel 11 */
- CHAN2G(2467, 11), /* Channel 12 */
- CHAN2G(2472, 12), /* Channel 13 */
- CHAN2G(2484, 13), /* Channel 14 */
-};
-
-static struct ieee80211_channel ath9k_5ghz_channels[] = {
- /* _We_ call this UNII 1 */
- CHAN5G(5180, 14), /* Channel 36 */
- CHAN5G(5200, 15), /* Channel 40 */
- CHAN5G(5220, 16), /* Channel 44 */
- CHAN5G(5240, 17), /* Channel 48 */
- /* _We_ call this UNII 2 */
- CHAN5G(5260, 18), /* Channel 52 */
- CHAN5G(5280, 19), /* Channel 56 */
- CHAN5G(5300, 20), /* Channel 60 */
- CHAN5G(5320, 21), /* Channel 64 */
- /* _We_ call this "Middle band" */
- CHAN5G(5500, 22), /* Channel 100 */
- CHAN5G(5520, 23), /* Channel 104 */
- CHAN5G(5540, 24), /* Channel 108 */
- CHAN5G(5560, 25), /* Channel 112 */
- CHAN5G(5580, 26), /* Channel 116 */
- CHAN5G(5600, 27), /* Channel 120 */
- CHAN5G(5620, 28), /* Channel 124 */
- CHAN5G(5640, 29), /* Channel 128 */
- CHAN5G(5660, 30), /* Channel 132 */
- CHAN5G(5680, 31), /* Channel 136 */
- CHAN5G(5700, 32), /* Channel 140 */
- /* _We_ call this UNII 3 */
- CHAN5G(5745, 33), /* Channel 149 */
- CHAN5G(5765, 34), /* Channel 153 */
- CHAN5G(5785, 35), /* Channel 157 */
- CHAN5G(5805, 36), /* Channel 161 */
- CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
- ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate), \
- .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
- RATE(10, 0x1b, 0),
- RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
- RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
- RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
- RATE(60, 0x0b, 0),
- RATE(90, 0x0f, 0),
- RATE(120, 0x0a, 0),
- RATE(180, 0x0e, 0),
- RATE(240, 0x09, 0),
- RATE(360, 0x0d, 0),
- RATE(480, 0x08, 0),
- RATE(540, 0x0c, 0),
-};
-
#ifdef CONFIG_MAC80211_LEDS
static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
{ .throughput = 0 * 1024, .blink_time = 334 },
@@ -343,6 +256,25 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
}
}
+static void ath9k_regwrite_multi(struct ath_common *common)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_dbg(common, WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+}
+
static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -369,8 +301,6 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
struct ath_hw *ah = (struct ath_hw *) hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
- u32 rsp_status;
- int r;
mutex_lock(&priv->wmi->multi_write_mutex);
@@ -383,19 +313,8 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
priv->wmi->multi_write_idx++;
/* If the buffer is full, send it out. */
- if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) {
- r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
- (u8 *) &priv->wmi->multi_write,
- sizeof(struct register_write) * priv->wmi->multi_write_idx,
- (u8 *) &rsp_status, sizeof(rsp_status),
- 100);
- if (unlikely(r)) {
- ath_dbg(common, WMI,
- "REGISTER WRITE FAILED, multi len: %d\n",
- priv->wmi->multi_write_idx);
- }
- priv->wmi->multi_write_idx = 0;
- }
+ if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER)
+ ath9k_regwrite_multi(common);
mutex_unlock(&priv->wmi->multi_write_mutex);
}
@@ -426,26 +345,13 @@ static void ath9k_regwrite_flush(void *hw_priv)
struct ath_hw *ah = (struct ath_hw *) hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
- u32 rsp_status;
- int r;
atomic_dec(&priv->wmi->mwrite_cnt);
mutex_lock(&priv->wmi->multi_write_mutex);
- if (priv->wmi->multi_write_idx) {
- r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
- (u8 *) &priv->wmi->multi_write,
- sizeof(struct register_write) * priv->wmi->multi_write_idx,
- (u8 *) &rsp_status, sizeof(rsp_status),
- 100);
- if (unlikely(r)) {
- ath_dbg(common, WMI,
- "REGISTER WRITE FAILED, multi len: %d\n",
- priv->wmi->multi_write_idx);
- }
- priv->wmi->multi_write_idx = 0;
- }
+ if (priv->wmi->multi_write_idx)
+ ath9k_regwrite_multi(common);
mutex_unlock(&priv->wmi->multi_write_mutex);
}
@@ -491,51 +397,6 @@ static const struct ath_bus_ops ath9k_usb_bus_ops = {
.eeprom_read = ath_usb_eeprom_read,
};
-static void setup_ht_cap(struct ath9k_htc_priv *priv,
- struct ieee80211_sta_ht_cap *ht_info)
-{
- struct ath_common *common = ath9k_hw_common(priv->ah);
- u8 tx_streams, rx_streams;
- int i;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SM_PS |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
-
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-
- ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
- /* ath9k_htc supports only 1 or 2 stream devices */
- tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2);
- rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2);
-
- ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
-
- if (tx_streams >= 2)
- ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
-
- if (tx_streams != rx_streams) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_streams - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-
- for (i = 0; i < rx_streams; i++)
- ht_info->mcs.rx_mask[i] = 0xff;
-
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
static int ath9k_init_queues(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -544,8 +405,8 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
priv->hwq_map[i] = -1;
- priv->beaconq = ath9k_hw_beaconq_setup(priv->ah);
- if (priv->beaconq == -1) {
+ priv->beacon.beaconq = ath9k_hw_beaconq_setup(priv->ah);
+ if (priv->beacon.beaconq == -1) {
ath_err(common, "Unable to setup BEACON xmit queue\n");
goto err;
}
@@ -580,37 +441,13 @@ err:
return -EINVAL;
}
-static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
-{
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
- priv->sbands[IEEE80211_BAND_2GHZ].channels =
- ath9k_2ghz_channels;
- priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
- ARRAY_SIZE(ath9k_2ghz_channels);
- priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates);
- }
-
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
- priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels;
- priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- priv->sbands[IEEE80211_BAND_5GHZ].n_channels =
- ARRAY_SIZE(ath9k_5ghz_channels);
- priv->sbands[IEEE80211_BAND_5GHZ].bitrates =
- ath9k_legacy_rates + 4;
- priv->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates) - 4;
- }
-}
-
static void ath9k_init_misc(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
priv->ah->opmode = NL80211_IFTYPE_STATION;
}
@@ -622,12 +459,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
struct ath_common *common;
int i, ret = 0, csz = 0;
- set_bit(OP_INVALID, &priv->op_flags);
-
ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
if (!ah)
return -ENOMEM;
+ ah->dev = priv->dev;
ah->hw_version.devid = devid;
ah->hw_version.usbdev = drv_info;
ah->ah_flags |= AH_USE_EEPROM;
@@ -647,6 +483,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
common->priv = priv;
common->debug_mask = ath9k_debug;
common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
+ set_bit(ATH_OP_INVALID, &common->op_flags);
spin_lock_init(&priv->beacon_lock);
spin_lock_init(&priv->tx.tx_lock);
@@ -682,10 +519,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
goto err_queues;
for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++)
- priv->cur_beacon_conf.bslot[i] = NULL;
+ priv->beacon.bslot[i] = NULL;
+ priv->beacon.slottime = ATH9K_SLOT_TIME_9;
+ ath9k_cmn_init_channels_rates(common);
ath9k_cmn_init_crypto(ah);
- ath9k_init_channels_rates(priv);
ath9k_init_misc(priv);
ath9k_htc_init_btcoex(priv, product);
@@ -721,6 +559,7 @@ static const struct ieee80211_iface_combination if_comb = {
static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw)
{
+ struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(priv->ah);
struct base_eep_header *pBase;
@@ -765,19 +604,12 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->sbands[IEEE80211_BAND_2GHZ];
+ &common->sbands[IEEE80211_BAND_2GHZ];
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->sbands[IEEE80211_BAND_5GHZ];
-
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- setup_ht_cap(priv,
- &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- setup_ht_cap(priv,
- &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
- }
+ &common->sbands[IEEE80211_BAND_5GHZ];
+
+ ath9k_cmn_reload_chainmask(ah);
pBase = ath9k_htc_get_eeprom_base(priv);
if (pBase) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c9254a61ca52..f46cd0250e48 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -250,7 +250,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
u8 cmd_rsp;
int ret;
- if (test_bit(OP_INVALID, &priv->op_flags))
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
return -EIO;
fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -304,7 +304,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
htc_start(priv->htc);
- if (!test_bit(OP_SCANNING, &priv->op_flags) &&
+ if (!test_bit(ATH_OP_SCANNING, &common->op_flags) &&
!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
ath9k_htc_vif_reconfig(priv);
@@ -748,7 +748,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
common->ani.shortcal_timer = timestamp;
common->ani.checkani_timer = timestamp;
- set_bit(OP_ANI_RUNNING, &priv->op_flags);
+ set_bit(ATH_OP_ANI_RUN, &common->op_flags);
ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
@@ -756,8 +756,9 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
cancel_delayed_work_sync(&priv->ani_work);
- clear_bit(OP_ANI_RUNNING, &priv->op_flags);
+ clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
}
void ath9k_htc_ani_work(struct work_struct *work)
@@ -942,7 +943,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ath_dbg(common, CONFIG,
"Failed to update capability in target\n");
- clear_bit(OP_INVALID, &priv->op_flags);
+ clear_bit(ATH_OP_INVALID, &common->op_flags);
htc_start(priv->htc);
spin_lock_bh(&priv->tx.tx_lock);
@@ -971,7 +972,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (test_bit(OP_INVALID, &priv->op_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&priv->mutex);
return;
@@ -1013,7 +1014,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
ath9k_htc_ps_restore(priv);
ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
- set_bit(OP_INVALID, &priv->op_flags);
+ set_bit(ATH_OP_INVALID, &common->op_flags);
ath_dbg(common, CONFIG, "Driver halt\n");
mutex_unlock(&priv->mutex);
@@ -1087,7 +1088,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
ath9k_htc_set_opmode(priv);
if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
- !test_bit(OP_ANI_RUNNING, &priv->op_flags)) {
+ !test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
ath9k_hw_set_tsfadjust(priv->ah, true);
ath9k_htc_start_ani(priv);
}
@@ -1245,13 +1246,14 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
u32 rfilt;
mutex_lock(&priv->mutex);
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
- if (test_bit(OP_INVALID, &priv->op_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(ath9k_hw_common(priv->ah), ANY,
"Unable to configure filter on invalid state\n");
mutex_unlock(&priv->mutex);
@@ -1474,7 +1476,9 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
common->curaid = bss_conf->aid;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
}
}
@@ -1496,6 +1500,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
+ int slottime;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
@@ -1507,6 +1512,9 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->assoc ?
priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
+ if (!bss_conf->assoc)
+ clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+
if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_htc_choose_set_bssid(priv);
if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
@@ -1528,7 +1536,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
bss_conf->bssid);
ath9k_htc_set_tsfadjust(priv, vif);
- set_bit(OP_ENABLE_BEACON, &priv->op_flags);
+ priv->cur_beacon_conf.enable_beacon = 1;
ath9k_htc_beacon_config(priv, vif);
}
@@ -1542,7 +1550,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG,
"Beacon disabled for BSS: %pM\n",
bss_conf->bssid);
- clear_bit(OP_ENABLE_BEACON, &priv->op_flags);
+ priv->cur_beacon_conf.enable_beacon = 0;
ath9k_htc_beacon_config(priv, vif);
}
}
@@ -1568,11 +1576,21 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
- ah->slottime = 9;
+ slottime = 9;
else
- ah->slottime = 20;
-
- ath9k_hw_init_global_settings(ah);
+ slottime = 20;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * Defer update, so that connected stations can adjust
+ * their settings at the same time.
+ * See beacon.c for more details
+ */
+ priv->beacon.slottime = slottime;
+ priv->beacon.updateslot = UPDATE;
+ } else {
+ ah->slottime = slottime;
+ ath9k_hw_init_global_settings(ah);
+ }
}
if (changed & BSS_CHANGED_HT)
@@ -1669,10 +1687,11 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
- set_bit(OP_SCANNING, &priv->op_flags);
+ set_bit(ATH_OP_SCANNING, &common->op_flags);
spin_unlock_bh(&priv->beacon_lock);
cancel_work_sync(&priv->ps_work);
ath9k_htc_stop_ani(priv);
@@ -1682,10 +1701,11 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
- clear_bit(OP_SCANNING, &priv->op_flags);
+ clear_bit(ATH_OP_SCANNING, &common->op_flags);
spin_unlock_bh(&priv->beacon_lock);
ath9k_htc_ps_wakeup(priv);
ath9k_htc_vif_reconfig(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 12e0f32a4905..e8149e3dbdd5 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -924,46 +924,43 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
- ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
- priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
+ ath9k_hw_startpcureceive(priv->ah, test_bit(ATH_OP_SCANNING, &common->op_flags));
}
-static void ath9k_process_rate(struct ieee80211_hw *hw,
- struct ieee80211_rx_status *rxs,
- u8 rx_rate, u8 rs_flags)
+static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
{
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
- unsigned int i = 0;
-
- if (rx_rate & 0x80) {
- /* HT rate */
- rxs->flag |= RX_FLAG_HT;
- if (rs_flags & ATH9K_RX_2040)
- rxs->flag |= RX_FLAG_40MHZ;
- if (rs_flags & ATH9K_RX_GI)
- rxs->flag |= RX_FLAG_SHORT_GI;
- rxs->rate_idx = rx_rate & 0x7f;
- return;
- }
-
- band = hw->conf.chandef.chan->band;
- sband = hw->wiphy->bands[band];
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_rate) {
- rxs->rate_idx = i;
- return;
- }
- if (sband->bitrates[i].hw_value_short == rx_rate) {
- rxs->rate_idx = i;
- rxs->flag |= RX_FLAG_SHORTPRE;
- return;
- }
- }
+ rx_stats->flag = 0;
+ if (rxstatus->rs_flags & ATH9K_RX_2040)
+ rx_stats->flag |= RX_FLAG_40MHZ;
+ if (rxstatus->rs_flags & ATH9K_RX_GI)
+ rx_stats->flag |= RX_FLAG_SHORT_GI;
+}
+static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
+{
+ rx_stats->rs_datalen = rxstatus->rs_datalen;
+ rx_stats->rs_status = rxstatus->rs_status;
+ rx_stats->rs_phyerr = rxstatus->rs_phyerr;
+ rx_stats->rs_rssi = rxstatus->rs_rssi;
+ rx_stats->rs_keyix = rxstatus->rs_keyix;
+ rx_stats->rs_rate = rxstatus->rs_rate;
+ rx_stats->rs_antenna = rxstatus->rs_antenna;
+ rx_stats->rs_more = rxstatus->rs_more;
+
+ memcpy(rx_stats->rs_rssi_ctl, rxstatus->rs_rssi_ctl,
+ sizeof(rx_stats->rs_rssi_ctl));
+ memcpy(rx_stats->rs_rssi_ext, rxstatus->rs_rssi_ext,
+ sizeof(rx_stats->rs_rssi_ext));
+
+ rx_stats->rs_isaggr = rxstatus->rs_isaggr;
+ rx_stats->rs_moreaggr = rxstatus->rs_moreaggr;
+ rx_stats->rs_num_delims = rxstatus->rs_num_delims;
+ convert_htc_flag(rx_stats, rxstatus);
}
static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
@@ -975,10 +972,10 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw = priv->hw;
struct sk_buff *skb = rxbuf->skb;
struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_hw *ah = common->ah;
struct ath_htc_rx_status *rxstatus;
- int hdrlen, padsize;
- int last_rssi = ATH_RSSI_DUMMY_MARKER;
- __le16 fc;
+ struct ath_rx_status rx_stats;
+ bool decrypt_error;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -999,103 +996,39 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
ath9k_htc_err_stat_rx(priv, rxstatus);
/* Get the RX status information */
- memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
- skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-
- padsize = hdrlen & 3;
- if (padsize && skb->len >= hdrlen+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, hdrlen);
- skb_pull(skb, padsize);
- }
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
- if (rxbuf->rxstatus.rs_status != 0) {
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
- goto rx_next;
-
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
- /* FIXME */
- } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
- if (ieee80211_is_ctl(fc))
- /*
- * Sometimes, we get invalid
- * MIC failures on valid control frames.
- * Remove these mic errors.
- */
- rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
- else
- rx_status->flag |= RX_FLAG_MMIC_ERROR;
- }
-
- /*
- * Reject error frames with the exception of
- * decryption and MIC failures. For monitor mode,
- * we also ignore the CRC error.
- */
- if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
- if (rxbuf->rxstatus.rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_CRC))
- goto rx_next;
- } else {
- if (rxbuf->rxstatus.rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
- goto rx_next;
- }
- }
- }
-
- if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
- u8 keyix;
- keyix = rxbuf->rxstatus.rs_keyix;
- if (keyix != ATH9K_RXKEYIX_INVALID) {
- rx_status->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc) &&
- skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
- if (test_bit(keyix, common->keymap))
- rx_status->flag |= RX_FLAG_DECRYPTED;
- }
- }
-
- ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
- rxbuf->rxstatus.rs_flags);
-
- if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
- !rxbuf->rxstatus.rs_moreaggr)
- ATH_RSSI_LPF(priv->rx.last_rssi,
- rxbuf->rxstatus.rs_rssi);
-
- last_rssi = priv->rx.last_rssi;
+ /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
+ * After this, we can drop this part of skb. */
+ rx_status_htc_to_ath(&rx_stats, rxstatus);
+ rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
+ skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
- if (ath_is_mybeacon(common, hdr)) {
- s8 rssi = rxbuf->rxstatus.rs_rssi;
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
+ &decrypt_error, priv->rxfilter))
+ goto rx_next;
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ ath9k_cmn_rx_skb_postprocess(common, skb, &rx_stats,
+ rx_status, decrypt_error);
- if (rssi < 0)
- rssi = 0;
+ if (ath9k_cmn_process_rate(common, hw, &rx_stats, rx_status))
+ goto rx_next;
- priv->ah->stats.avgbrssi = rssi;
- }
+ rx_stats.is_mybeacon = ath_is_mybeacon(common, hdr);
+ ath9k_cmn_process_rssi(common, hw, &rx_stats, rx_status);
- rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
- rx_status->band = hw->conf.chandef.chan->band;
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
- rx_status->antenna = rxbuf->rxstatus.rs_antenna;
+ rx_status->band = ah->curchan->chan->band;
+ rx_status->freq = ah->curchan->chan->center_freq;
+ rx_status->antenna = rx_stats.rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_END;
return true;
-
rx_next:
return false;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index aac4a406a513..a0ff5b637054 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -358,6 +358,36 @@ ret:
kfree_skb(skb);
}
+static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
+ struct sk_buff *skb)
+{
+ uint32_t *pattern = (uint32_t *)skb->data;
+
+ switch (*pattern) {
+ case 0x33221199:
+ {
+ struct htc_panic_bad_vaddr *htc_panic;
+ htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
+ htc_panic->exccause, htc_panic->pc,
+ htc_panic->badvaddr);
+ break;
+ }
+ case 0x33221299:
+ {
+ struct htc_panic_bad_epid *htc_panic;
+ htc_panic = (struct htc_panic_bad_epid *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "bad epid: 0x%08x\n", htc_panic->epid);
+ break;
+ }
+ default:
+ dev_err(htc_handle->dev, "ath: uknown panic pattern!\n");
+ break;
+ }
+}
+
/*
* HTC Messages are handled directly here and the obtained SKB
* is freed.
@@ -379,6 +409,12 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
+ if (epid == 0x99) {
+ ath9k_htc_fw_panic_report(htc_handle, skb);
+ kfree_skb(skb);
+ return;
+ }
+
if (epid >= ENDPOINT_MAX) {
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index e1ffbb6bd636..06474ccc7696 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -77,6 +77,18 @@ struct htc_config_pipe_msg {
u8 credits;
} __packed;
+struct htc_panic_bad_vaddr {
+ __be32 pattern;
+ __be32 exccause;
+ __be32 pc;
+ __be32 badvaddr;
+} __packed;
+
+struct htc_panic_bad_epid {
+ __be32 pattern;
+ __be32 epid;
+} __packed;
+
struct htc_ep_callbacks {
void *priv;
void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 9078a6c5a74e..c8a9dfab1fee 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,7 +23,6 @@
#include "hw.h"
#include "hw-ops.h"
-#include "rc.h"
#include "ar9003_mac.h"
#include "ar9003_mci.h"
#include "ar9003_phy.h"
@@ -883,7 +882,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -3048,6 +3047,7 @@ static struct {
{ AR_SREV_VERSION_9462, "9462" },
{ AR_SREV_VERSION_9550, "9550" },
{ AR_SREV_VERSION_9565, "9565" },
+ { AR_SREV_VERSION_9531, "9531" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 1fc2e5a26b52..c0a4e866edca 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -62,111 +62,6 @@ module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
bool is_ath9k_unloaded;
-/* We use the hw_value as an index into our private channel structure */
-
-#define CHAN2G(_freq, _idx) { \
- .band = IEEE80211_BAND_2GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-/* Some 2 GHz radios are actually tunable on 2312-2732
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
- CHAN2G(2412, 0), /* Channel 1 */
- CHAN2G(2417, 1), /* Channel 2 */
- CHAN2G(2422, 2), /* Channel 3 */
- CHAN2G(2427, 3), /* Channel 4 */
- CHAN2G(2432, 4), /* Channel 5 */
- CHAN2G(2437, 5), /* Channel 6 */
- CHAN2G(2442, 6), /* Channel 7 */
- CHAN2G(2447, 7), /* Channel 8 */
- CHAN2G(2452, 8), /* Channel 9 */
- CHAN2G(2457, 9), /* Channel 10 */
- CHAN2G(2462, 10), /* Channel 11 */
- CHAN2G(2467, 11), /* Channel 12 */
- CHAN2G(2472, 12), /* Channel 13 */
- CHAN2G(2484, 13), /* Channel 14 */
-};
-
-/* Some 5 GHz radios are actually tunable on XXXX-YYYY
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
- /* _We_ call this UNII 1 */
- CHAN5G(5180, 14), /* Channel 36 */
- CHAN5G(5200, 15), /* Channel 40 */
- CHAN5G(5220, 16), /* Channel 44 */
- CHAN5G(5240, 17), /* Channel 48 */
- /* _We_ call this UNII 2 */
- CHAN5G(5260, 18), /* Channel 52 */
- CHAN5G(5280, 19), /* Channel 56 */
- CHAN5G(5300, 20), /* Channel 60 */
- CHAN5G(5320, 21), /* Channel 64 */
- /* _We_ call this "Middle band" */
- CHAN5G(5500, 22), /* Channel 100 */
- CHAN5G(5520, 23), /* Channel 104 */
- CHAN5G(5540, 24), /* Channel 108 */
- CHAN5G(5560, 25), /* Channel 112 */
- CHAN5G(5580, 26), /* Channel 116 */
- CHAN5G(5600, 27), /* Channel 120 */
- CHAN5G(5620, 28), /* Channel 124 */
- CHAN5G(5640, 29), /* Channel 128 */
- CHAN5G(5660, 30), /* Channel 132 */
- CHAN5G(5680, 31), /* Channel 136 */
- CHAN5G(5700, 32), /* Channel 140 */
- /* _We_ call this UNII 3 */
- CHAN5G(5745, 33), /* Channel 149 */
- CHAN5G(5765, 34), /* Channel 153 */
- CHAN5G(5785, 35), /* Channel 157 */
- CHAN5G(5805, 36), /* Channel 161 */
- CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
- ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate), \
- .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
- RATE(10, 0x1b, 0),
- RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
- RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
- IEEE80211_RATE_SUPPORTS_10MHZ)),
-};
#ifdef CONFIG_MAC80211_LEDS
static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
@@ -258,64 +153,6 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
/* Initialization */
/**************************/
-static void setup_ht_cap(struct ath_softc *sc,
- struct ieee80211_sta_ht_cap *ht_info)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- u8 tx_streams, rx_streams;
- int i, max_streams;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SM_PS |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
- ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
- if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
- max_streams = 1;
- else if (AR_SREV_9462(ah))
- max_streams = 2;
- else if (AR_SREV_9300_20_OR_LATER(ah))
- max_streams = 3;
- else
- max_streams = 2;
-
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- if (max_streams >= 2)
- ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
- ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
- }
-
- /* set up supported mcs set */
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
- rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
-
- ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
-
- if (tx_streams != rx_streams) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_streams - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-
- for (i = 0; i < rx_streams; i++)
- ht_info->mcs.rx_mask[i] = 0xff;
-
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
static void ath9k_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
@@ -486,51 +323,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
return 0;
}
-static int ath9k_init_channels_rates(struct ath_softc *sc)
-{
- void *channels;
-
- BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
- ARRAY_SIZE(ath9k_5ghz_chantable) !=
- ATH9K_NUM_CHANNELS);
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
- channels = devm_kzalloc(sc->dev,
- sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- memcpy(channels, ath9k_2ghz_chantable,
- sizeof(ath9k_2ghz_chantable));
- sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
- sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
- ARRAY_SIZE(ath9k_2ghz_chantable);
- sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates);
- }
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
- channels = devm_kzalloc(sc->dev,
- sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- memcpy(channels, ath9k_5ghz_chantable,
- sizeof(ath9k_5ghz_chantable));
- sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
- sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
- ARRAY_SIZE(ath9k_5ghz_chantable);
- sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
- ath9k_legacy_rates + 4;
- sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates) - 4;
- }
- return 0;
-}
-
static void ath9k_init_misc(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -538,7 +330,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->config.txpowlimit = ATH_TXPOWER_MAX;
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -793,7 +585,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
goto err_btcoex;
- ret = ath9k_init_channels_rates(sc);
+ ret = ath9k_cmn_init_channels_rates(common);
if (ret)
goto err_btcoex;
@@ -823,10 +615,11 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct cfg80211_chan_def chandef;
int i;
- sband = &sc->sbands[band];
+ sband = &common->sbands[band];
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
ah->curchan = &ah->channels[chan->hw_value];
@@ -849,17 +642,6 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
ah->curchan = curchan;
}
-void ath9k_reload_chainmask_settings(struct ath_softc *sc)
-{
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
- return;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
-}
-
static const struct ieee80211_iface_limit if_limits[] = {
{ .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -949,6 +731,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->queues = 4;
hw->max_rates = 4;
@@ -969,13 +752,13 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &sc->sbands[IEEE80211_BAND_2GHZ];
+ &common->sbands[IEEE80211_BAND_2GHZ];
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &sc->sbands[IEEE80211_BAND_5GHZ];
+ &common->sbands[IEEE80211_BAND_5GHZ];
ath9k_init_wow(hw);
- ath9k_reload_chainmask_settings(sc);
+ ath9k_cmn_reload_chainmask(ah);
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
}
@@ -1106,19 +889,11 @@ static int __init ath9k_init(void)
{
int error;
- /* Register rate control algorithm */
- error = ath_rate_control_register();
- if (error != 0) {
- pr_err("Unable to register rate control algorithm: %d\n",
- error);
- goto err_out;
- }
-
error = ath_pci_init();
if (error < 0) {
pr_err("No PCI devices found, driver not installed\n");
error = -ENODEV;
- goto err_rate_unregister;
+ goto err_out;
}
error = ath_ahb_init();
@@ -1131,9 +906,6 @@ static int __init ath9k_init(void)
err_pci_exit:
ath_pci_exit();
-
- err_rate_unregister:
- ath_rate_control_unregister();
err_out:
return error;
}
@@ -1144,7 +916,6 @@ static void __exit ath9k_exit(void)
is_ath9k_unloaded = true;
ath_ahb_exit();
ath_pci_exit();
- ath_rate_control_unregister();
pr_info("%s: Driver unloaded\n", dev_info);
}
module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 30dcef5aba10..72a715fe8f24 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -115,13 +115,14 @@ void ath_hw_pll_work(struct work_struct *work)
u32 pll_sqsum;
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_pll_work.work);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
/*
* ensure that the PLL WAR is executed only
* after the STA is associated (or) if the
* beaconing had started in interfaces that
* uses beacons.
*/
- if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+ if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
return;
if (sc->tx99_state)
@@ -414,7 +415,7 @@ void ath_start_ani(struct ath_softc *sc)
unsigned long timestamp = jiffies_to_msecs(jiffies);
if (common->disable_ani ||
- !test_bit(SC_OP_ANI_RUN, &sc->sc_flags) ||
+ !test_bit(ATH_OP_ANI_RUN, &common->op_flags) ||
(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
return;
@@ -438,6 +439,7 @@ void ath_stop_ani(struct ath_softc *sc)
void ath_check_ani(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
/*
@@ -453,23 +455,23 @@ void ath_check_ani(struct ath_softc *sc)
* Disable ANI only when there are no
* associated stations.
*/
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
goto stop_ani;
}
} else if (ah->opmode == NL80211_IFTYPE_STATION) {
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
goto stop_ani;
}
- if (!test_bit(SC_OP_ANI_RUN, &sc->sc_flags)) {
- set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ if (!test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
+ set_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_start_ani(sc);
}
return;
stop_ani:
- clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
ath_stop_ani(sc);
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 5f727588ca27..51ce36f108f9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -827,7 +827,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
return;
}
- if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
async_mask = AR_INTR_MAC_IRQ;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 10271373a0cd..89df634e81f9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -155,12 +155,8 @@ struct ath_htc_rx_status {
u8 rs_status;
u8 rs_phyerr;
int8_t rs_rssi;
- int8_t rs_rssi_ctl0;
- int8_t rs_rssi_ctl1;
- int8_t rs_rssi_ctl2;
- int8_t rs_rssi_ext0;
- int8_t rs_rssi_ext1;
- int8_t rs_rssi_ext2;
+ int8_t rs_rssi_ctl[3];
+ int8_t rs_rssi_ext[3];
u8 rs_keyix;
u8 rs_rate;
u8 rs_antenna;
@@ -170,6 +166,7 @@ struct ath_htc_rx_status {
u8 rs_num_delims;
u8 rs_flags;
u8 rs_dummy;
+ /* FIXME: evm* never used? */
__be32 evm0;
__be32 evm1;
__be32 evm2;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5924f72dd493..d69853b848ce 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -229,16 +229,16 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
- clear_bit(SC_OP_HW_RESET, &sc->sc_flags);
+ clear_bit(ATH_OP_HW_RESET, &common->op_flags);
ath9k_hw_set_interrupts(ah);
ath9k_hw_enable_interrupts(ah);
if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
- if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+ if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
goto work;
if (ah->opmode == NL80211_IFTYPE_STATION &&
- test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
spin_lock_irqsave(&sc->sc_pm_lock, flags);
sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@@ -336,7 +336,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
int old_pos = -1;
int r;
- if (test_bit(SC_OP_INVALID, &sc->sc_flags))
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
return -EIO;
offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -402,7 +402,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
chan->center_freq);
} else {
/* perform spectral scan if requested. */
- if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
+ if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
sc->spectral_mode == SPECTRAL_CHANSCAN)
ath9k_spectral_scan_trigger(hw);
}
@@ -451,7 +451,7 @@ void ath9k_tasklet(unsigned long data)
* interrupts are enabled in the reset routine.
*/
atomic_inc(&ah->intr_ref_cnt);
- ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
+ ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
goto out;
}
@@ -471,7 +471,7 @@ void ath9k_tasklet(unsigned long data)
* interrupts are enabled in the reset routine.
*/
atomic_inc(&ah->intr_ref_cnt);
- ath_dbg(common, ANY,
+ ath_dbg(common, RESET,
"BB_WATCHDOG: Skipping interrupts\n");
goto out;
}
@@ -484,7 +484,7 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_TX_GTT;
ath9k_queue_reset(sc, type);
atomic_inc(&ah->intr_ref_cnt);
- ath_dbg(common, ANY,
+ ath_dbg(common, RESET,
"GTT: Skipping interrupts\n");
goto out;
}
@@ -566,6 +566,7 @@ irqreturn_t ath_isr(int irq, void *dev)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
enum ath9k_int status;
u32 sync_cause = 0;
bool sched = false;
@@ -575,7 +576,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* touch anything. Note this can happen early
* on if the IRQ is shared.
*/
- if (test_bit(SC_OP_INVALID, &sc->sc_flags))
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
/* shared irq, not for us */
@@ -583,7 +584,7 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
ath9k_hw_kill_interrupts(ah);
return IRQ_HANDLED;
}
@@ -684,10 +685,11 @@ int ath_reset(struct ath_softc *sc)
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
#ifdef CONFIG_ATH9K_DEBUGFS
RESET_STAT_INC(sc, type);
#endif
- set_bit(SC_OP_HW_RESET, &sc->sc_flags);
+ set_bit(ATH_OP_HW_RESET, &common->op_flags);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
@@ -768,7 +770,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath_mci_enable(sc);
- clear_bit(SC_OP_INVALID, &sc->sc_flags);
+ clear_bit(ATH_OP_INVALID, &common->op_flags);
sc->sc_ah->is_monitoring = false;
if (!ath_complete_reset(sc, false))
@@ -885,7 +887,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_cancel_work(sc);
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@@ -940,7 +942,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_ps_restore(sc);
- set_bit(SC_OP_INVALID, &sc->sc_flags);
+ set_bit(ATH_OP_INVALID, &common->op_flags);
sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
@@ -1081,7 +1083,7 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
*/
if (ah->opmode == NL80211_IFTYPE_STATION &&
old_opmode == NL80211_IFTYPE_AP &&
- test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
ieee80211_iterate_active_interfaces_atomic(
sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath9k_sta_vif_iter, sc);
@@ -1178,9 +1180,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
- if (sc->csa_vif == vif)
- sc->csa_vif = NULL;
-
ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, NULL);
ath9k_ps_restore(sc);
@@ -1593,7 +1592,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
unsigned long flags;
- set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+ set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
avp->primary_sta_vif = true;
/*
@@ -1609,7 +1608,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -1628,8 +1627,9 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath_softc *sc = data;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ if (test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
return;
if (bss_conf->assoc)
@@ -1660,18 +1660,18 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid, bss_conf->assoc);
if (avp->primary_sta_vif && !bss_conf->assoc) {
- clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+ clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
avp->primary_sta_vif = false;
if (ah->opmode == NL80211_IFTYPE_STATION)
- clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+ clear_bit(ATH_OP_BEACONS, &common->op_flags);
}
ieee80211_iterate_active_interfaces_atomic(
sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath9k_bss_assoc_iter, sc);
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags) &&
ah->opmode == NL80211_IFTYPE_STATION) {
memset(common->curbssid, 0, ETH_ALEN);
common->curaid = 0;
@@ -1866,7 +1866,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
static bool ath9k_has_tx_pending(struct ath_softc *sc)
{
- int i, npend;
+ int i, npend = 0;
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
@@ -1900,7 +1900,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
return;
}
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@@ -2056,7 +2056,7 @@ static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
- ath9k_reload_chainmask_settings(sc);
+ ath9k_cmn_reload_chainmask(ah);
return 0;
}
@@ -2073,26 +2073,23 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
- set_bit(SC_OP_SCANNING, &sc->sc_flags);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ set_bit(ATH_OP_SCANNING, &common->op_flags);
}
static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
- clear_bit(SC_OP_SCANNING, &sc->sc_flags);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ clear_bit(ATH_OP_SCANNING, &common->op_flags);
}
static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
- struct ath_softc *sc = hw->priv;
-
- /* mac80211 does not support CSA in multi-if cases (yet) */
- if (WARN_ON(sc->csa_vif))
- return;
-
- sc->csa_vif = vif;
+ /* depend on vif->csa_active only */
+ return;
}
struct ieee80211_ops ath9k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 71799fcade54..a0dbcc412384 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -555,7 +555,7 @@ void ath_mci_intr(struct ath_softc *sc)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
while (more_data == MCI_GPM_MORE) {
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
return;
pgpm = mci->gpm_buf.bf_addr;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 55724b02316b..25304adece57 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -784,6 +784,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ath_softc *sc;
struct ieee80211_hw *hw;
+ struct ath_common *common;
u8 csz;
u32 val;
int ret = 0;
@@ -858,9 +859,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->mem = pcim_iomap_table(pdev)[0];
sc->driver_data = id->driver_data;
- /* Will be cleared in ath9k_start() */
- set_bit(SC_OP_INVALID, &sc->sc_flags);
-
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
@@ -879,6 +877,10 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)sc->mem, pdev->irq);
+ /* Will be cleared in ath9k_start() */
+ common = ath9k_hw_common(sc->sc_ah);
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
deleted file mode 100644
index d829bb62a3fc..000000000000
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2004-2011 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "ath9k.h"
-
-static const struct ath_rate_table ar5416_11na_ratetable = {
- 68,
- 8, /* MCS start */
- {
- [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
- 5400, 0, 12 }, /* 6 Mb */
- [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
- 7800, 1, 18 }, /* 9 Mb */
- [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10000, 2, 24 }, /* 12 Mb */
- [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 13900, 3, 36 }, /* 18 Mb */
- [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17300, 4, 48 }, /* 24 Mb */
- [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23000, 5, 72 }, /* 36 Mb */
- [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 6, 96 }, /* 48 Mb */
- [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 29300, 7, 108 }, /* 54 Mb */
- [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0 }, /* 6.5 Mb */
- [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1 }, /* 13 Mb */
- [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2 }, /* 19.5 Mb */
- [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3 }, /* 26 Mb */
- [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4 }, /* 39 Mb */
- [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5 }, /* 52 Mb */
- [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6 }, /* 58.5 Mb */
- [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7 }, /* 65 Mb */
- [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7 }, /* 75 Mb */
- [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8 }, /* 13 Mb */
- [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9 }, /* 26 Mb */
- [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10 }, /* 39 Mb */
- [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11 }, /* 52 Mb */
- [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12 }, /* 78 Mb */
- [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13 }, /* 104 Mb */
- [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14 }, /* 117 Mb */
- [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15 }, /* 130 Mb */
- [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15 }, /* 144.4 Mb */
- [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16 }, /* 19.5 Mb */
- [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17 }, /* 39 Mb */
- [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18 }, /* 58.5 Mb */
- [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19 }, /* 78 Mb */
- [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20 }, /* 117 Mb */
- [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20 }, /* 130 Mb*/
- [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21 }, /* 156 Mb */
- [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21 }, /* 173.3 Mb */
- [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22 }, /* 175.5 Mb */
- [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22 }, /* 195 Mb*/
- [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23 }, /* 195 Mb */
- [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23 }, /* 216.7 Mb */
- [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0 }, /* 13.5 Mb*/
- [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1 }, /* 27.0 Mb*/
- [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2 }, /* 40.5 Mb*/
- [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3 }, /* 54 Mb */
- [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4 }, /* 81 Mb */
- [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5 }, /* 108 Mb */
- [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6 }, /* 121.5 Mb*/
- [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7 }, /* 135 Mb */
- [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7 }, /* 150 Mb */
- [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8 }, /* 27 Mb */
- [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9 }, /* 54 Mb */
- [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10 }, /* 81 Mb */
- [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11 }, /* 108 Mb */
- [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12 }, /* 162 Mb */
- [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13 }, /* 216 Mb */
- [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14 }, /* 243 Mb */
- [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15 }, /* 270 Mb */
- [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15 }, /* 300 Mb */
- [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16 }, /* 40.5 Mb */
- [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17 }, /* 81 Mb */
- [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18 }, /* 121.5 Mb */
- [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19 }, /* 162 Mb */
- [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20 }, /* 243 Mb */
- [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20 }, /* 270 Mb */
- [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21 }, /* 324 Mb */
- [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21 }, /* 360 Mb */
- [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22 }, /* 364.5 Mb */
- [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22 }, /* 405 Mb */
- [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23 }, /* 405 Mb */
- [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23 }, /* 450 Mb */
- },
- 50, /* probe interval */
- WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
-};
-
-/* 4ms frame limit not used for NG mode. The values filled
- * for HT are the 64K max aggregate limit */
-
-static const struct ath_rate_table ar5416_11ng_ratetable = {
- 72,
- 12, /* MCS start */
- {
- [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
- 900, 0, 2 }, /* 1 Mb */
- [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
- 1900, 1, 4 }, /* 2 Mb */
- [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
- 4900, 2, 11 }, /* 5.5 Mb */
- [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
- 8100, 3, 22 }, /* 11 Mb */
- [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
- 5400, 4, 12 }, /* 6 Mb */
- [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
- 7800, 5, 18 }, /* 9 Mb */
- [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10100, 6, 24 }, /* 12 Mb */
- [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 14100, 7, 36 }, /* 18 Mb */
- [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17700, 8, 48 }, /* 24 Mb */
- [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23700, 9, 72 }, /* 36 Mb */
- [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 10, 96 }, /* 48 Mb */
- [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 30900, 11, 108 }, /* 54 Mb */
- [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0 }, /* 6.5 Mb */
- [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1 }, /* 13 Mb */
- [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2 }, /* 19.5 Mb*/
- [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3 }, /* 26 Mb */
- [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4 }, /* 39 Mb */
- [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5 }, /* 52 Mb */
- [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6 }, /* 58.5 Mb */
- [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7 }, /* 65 Mb */
- [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7 }, /* 65 Mb*/
- [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8 }, /* 13 Mb */
- [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9 }, /* 26 Mb */
- [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10 }, /* 39 Mb */
- [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11 }, /* 52 Mb */
- [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12 }, /* 78 Mb */
- [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13 }, /* 104 Mb */
- [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14 }, /* 117 Mb */
- [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15 }, /* 130 Mb */
- [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15 }, /* 144.4 Mb */
- [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16 }, /* 19.5 Mb */
- [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17 }, /* 39 Mb */
- [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18 }, /* 58.5 Mb */
- [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19 }, /* 78 Mb */
- [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20 }, /* 117 Mb */
- [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20 }, /* 130 Mb */
- [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21 }, /* 156 Mb */
- [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21 }, /* 173.3 Mb */
- [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22 }, /* 175.5 Mb */
- [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22 }, /* 195 Mb */
- [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23 }, /* 195 Mb */
- [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23 }, /* 216.7 Mb */
- [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0 }, /* 13.5 Mb */
- [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1 }, /* 27.0 Mb */
- [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2 }, /* 40.5 Mb */
- [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3 }, /* 54 Mb */
- [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4 }, /* 81 Mb */
- [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5 }, /* 108 Mb */
- [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6 }, /* 121.5 Mb */
- [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7 }, /* 135 Mb */
- [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7 }, /* 150 Mb */
- [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8 }, /* 27 Mb */
- [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9 }, /* 54 Mb */
- [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10 }, /* 81 Mb */
- [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11 }, /* 108 Mb */
- [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12 }, /* 162 Mb */
- [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13 }, /* 216 Mb */
- [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14 }, /* 243 Mb */
- [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15 }, /* 270 Mb */
- [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15 }, /* 300 Mb */
- [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16 }, /* 40.5 Mb */
- [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17 }, /* 81 Mb */
- [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18 }, /* 121.5 Mb */
- [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19 }, /* 162 Mb */
- [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20 }, /* 243 Mb */
- [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20 }, /* 270 Mb */
- [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21 }, /* 324 Mb */
- [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21 }, /* 360 Mb */
- [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22 }, /* 364.5 Mb */
- [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22 }, /* 405 Mb */
- [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23 }, /* 405 Mb */
- [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23 }, /* 450 Mb */
- },
- 50, /* probe interval */
- WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11a_ratetable = {
- 8,
- 0,
- {
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 0, 12},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 1, 18},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 2, 24},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 3, 36},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 4, 48},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 5, 72},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 6, 96},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 7, 108},
- },
- 50, /* probe interval */
- 0, /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11g_ratetable = {
- 12,
- 0,
- {
- { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
- 900, 0, 2},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
- 1900, 1, 4},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
- 4900, 2, 11},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
- 8100, 3, 22},
- { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 4, 12},
- { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 5, 18},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 6, 24},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 7, 36},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 8, 48},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 9, 72},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 10, 96},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 11, 108},
- },
- 50, /* probe interval */
- 0, /* Phy rates allowed initially */
-};
-
-static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_rate *rate)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- int rix, i, idx = 0;
-
- if (!(rate->flags & IEEE80211_TX_RC_MCS))
- return rate->idx;
-
- for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
- idx = ath_rc_priv->valid_rate_index[i];
-
- if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
- rate_table->info[idx].ratecode == rate->idx)
- break;
- }
-
- rix = idx;
-
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rix++;
-
- return rix;
-}
-
-static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u8 i, j, idx, idx_next;
-
- for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
- for (j = 0; j <= i-1; j++) {
- idx = ath_rc_priv->valid_rate_index[j];
- idx_next = ath_rc_priv->valid_rate_index[j+1];
-
- if (rate_table->info[idx].ratekbps >
- rate_table->info[idx_next].ratekbps) {
- ath_rc_priv->valid_rate_index[j] = idx_next;
- ath_rc_priv->valid_rate_index[j+1] = idx;
- }
- }
- }
-}
-
-static inline
-int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
- u8 cur_valid_txrate,
- u8 *next_idx)
-{
- u8 i;
-
- for (i = 0; i < ath_rc_priv->max_valid_rate - 1; i++) {
- if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
- *next_idx = ath_rc_priv->valid_rate_index[i+1];
- return 1;
- }
- }
-
- /* No more valid rates */
- *next_idx = 0;
-
- return 0;
-}
-
-/* Return true only for single stream */
-
-static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
-{
- if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG))
- return 0;
- if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
- return 0;
- if (WLAN_RC_PHY_TS(phy) && !(capflag & WLAN_RC_TS_FLAG))
- return 0;
- if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
- return 0;
- if (!ignore_cw && WLAN_RC_PHY_HT(phy))
- if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
- return 0;
- return 1;
-}
-
-static inline int
-ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
- u8 cur_valid_txrate, u8 *next_idx)
-{
- int8_t i;
-
- for (i = 1; i < ath_rc_priv->max_valid_rate ; i++) {
- if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
- *next_idx = ath_rc_priv->valid_rate_index[i-1];
- return 1;
- }
- }
-
- return 0;
-}
-
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u8 i, hi = 0;
-
- for (i = 0; i < rate_table->rate_cnt; i++) {
- if (rate_table->info[i].rate_flags & RC_LEGACY) {
- u32 phy = rate_table->info[i].phy;
- u8 valid_rate_count = 0;
-
- if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
- continue;
-
- valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
-
- ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
- ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_priv->valid_rate_index[i] = true;
- hi = i;
- }
- }
-
- return hi;
-}
-
-static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
- u32 phy, u32 capflag)
-{
- if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
- return false;
-
- if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
- return false;
-
- if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
- return false;
-
- return true;
-}
-
-static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
- u32 phy, u32 capflag)
-{
- if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
- return false;
-
- if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
- return false;
-
- if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
- return false;
-
- return true;
-}
-
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- struct ath_rateset *rateset;
- u32 phy, capflag = ath_rc_priv->ht_cap;
- u16 rate_flags;
- u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
-
- if (legacy)
- rateset = &ath_rc_priv->neg_rates;
- else
- rateset = &ath_rc_priv->neg_ht_rates;
-
- for (i = 0; i < rateset->rs_nrates; i++) {
- for (j = 0; j < rate_table->rate_cnt; j++) {
- phy = rate_table->info[j].phy;
- rate_flags = rate_table->info[j].rate_flags;
- rate = rateset->rs_rates[i];
- dot11rate = rate_table->info[j].dot11rate;
-
- if (legacy &&
- !ath_rc_check_legacy(rate, dot11rate,
- rate_flags, phy, capflag))
- continue;
-
- if (!legacy &&
- !ath_rc_check_ht(rate, dot11rate,
- rate_flags, phy, capflag))
- continue;
-
- if (!ath_rc_valid_phyrate(phy, capflag, 0))
- continue;
-
- valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
- ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
- ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_priv->valid_rate_index[j] = true;
- hi = max(hi, j);
- }
- }
-
- return hi;
-}
-
-static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
- int *is_probing)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u32 best_thruput, this_thruput, now_msec;
- u8 rate, next_rate, best_rate, maxindex, minindex;
- int8_t index = 0;
-
- now_msec = jiffies_to_msecs(jiffies);
- *is_probing = 0;
- best_thruput = 0;
- maxindex = ath_rc_priv->max_valid_rate-1;
- minindex = 0;
- best_rate = minindex;
-
- /*
- * Try the higher rate first. It will reduce memory moving time
- * if we have very good channel characteristics.
- */
- for (index = maxindex; index >= minindex ; index--) {
- u8 per_thres;
-
- rate = ath_rc_priv->valid_rate_index[index];
- if (rate > ath_rc_priv->rate_max_phy)
- continue;
-
- /*
- * For TCP the average collision rate is around 11%,
- * so we ignore PERs less than this. This is to
- * prevent the rate we are currently using (whose
- * PER might be in the 10-15 range because of TCP
- * collisions) looking worse than the next lower
- * rate whose PER has decayed close to 0. If we
- * used to next lower rate, its PER would grow to
- * 10-15 and we would be worse off then staying
- * at the current rate.
- */
- per_thres = ath_rc_priv->per[rate];
- if (per_thres < 12)
- per_thres = 12;
-
- this_thruput = rate_table->info[rate].user_ratekbps *
- (100 - per_thres);
-
- if (best_thruput <= this_thruput) {
- best_thruput = this_thruput;
- best_rate = rate;
- }
- }
-
- rate = best_rate;
-
- /*
- * Must check the actual rate (ratekbps) to account for
- * non-monoticity of 11g's rate table
- */
-
- if (rate >= ath_rc_priv->rate_max_phy) {
- rate = ath_rc_priv->rate_max_phy;
-
- /* Probe the next allowed phy state */
- if (ath_rc_get_nextvalid_txrate(rate_table,
- ath_rc_priv, rate, &next_rate) &&
- (now_msec - ath_rc_priv->probe_time >
- rate_table->probe_interval) &&
- (ath_rc_priv->hw_maxretry_pktcnt >= 1)) {
- rate = next_rate;
- ath_rc_priv->probe_rate = rate;
- ath_rc_priv->probe_time = now_msec;
- ath_rc_priv->hw_maxretry_pktcnt = 0;
- *is_probing = 1;
- }
- }
-
- if (rate > (ath_rc_priv->rate_table_size - 1))
- rate = ath_rc_priv->rate_table_size - 1;
-
- if (RC_TS_ONLY(rate_table->info[rate].rate_flags) &&
- (ath_rc_priv->ht_cap & WLAN_RC_TS_FLAG))
- return rate;
-
- if (RC_DS_OR_LATER(rate_table->info[rate].rate_flags) &&
- (ath_rc_priv->ht_cap & (WLAN_RC_DS_FLAG | WLAN_RC_TS_FLAG)))
- return rate;
-
- if (RC_SS_OR_LEGACY(rate_table->info[rate].rate_flags))
- return rate;
-
- /* This should not happen */
- WARN_ON_ONCE(1);
-
- rate = ath_rc_priv->valid_rate_index[0];
-
- return rate;
-}
-
-static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
- struct ieee80211_tx_rate *rate,
- struct ieee80211_tx_rate_control *txrc,
- u8 tries, u8 rix, int rtsctsenable)
-{
- rate->count = tries;
- rate->idx = rate_table->info[rix].ratecode;
-
- if (txrc->rts || rtsctsenable)
- rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
-
- if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
- rate->flags |= IEEE80211_TX_RC_MCS;
- if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
- conf_is_ht40(&txrc->hw->conf))
- rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
- rate->flags |= IEEE80211_TX_RC_SHORT_GI;
- }
-}
-
-static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
- const struct ath_rate_table *rate_table,
- struct ieee80211_tx_info *tx_info)
-{
- struct ieee80211_bss_conf *bss_conf;
-
- if (!tx_info->control.vif)
- return;
- /*
- * For legacy frames, mac80211 takes care of CTS protection.
- */
- if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
- return;
-
- bss_conf = &tx_info->control.vif->bss_conf;
-
- if (!bss_conf->basic_rates)
- return;
-
- /*
- * For now, use the lowest allowed basic rate for HT frames.
- */
- tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
-}
-
-static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
- struct ieee80211_tx_rate_control *txrc)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- const struct ath_rate_table *rate_table;
- struct sk_buff *skb = txrc->skb;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->control.rates;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix;
- int is_probe = 0;
-
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- /*
- * For Multi Rate Retry we use a different number of
- * retry attempt counts. This ends up looking like this:
- *
- * MRR[0] = 4
- * MRR[1] = 4
- * MRR[2] = 4
- * MRR[3] = 8
- *
- */
- try_per_rate = 4;
-
- rate_table = ath_rc_priv->rate_table;
- rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
-
- if (conf_is_ht(&sc->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
- tx_info->flags |= IEEE80211_TX_CTL_LDPC;
-
- if (conf_is_ht(&sc->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
- tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
-
- if (is_probe) {
- /*
- * Set one try for probe rates. For the
- * probes don't enable RTS.
- */
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- 1, rix, 0);
- /*
- * Get the next tried/allowed rate.
- * No RTS for the next series after the probe rate.
- */
- ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, rix, 0);
-
- tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
- } else {
- /*
- * Set the chosen rate. No RTS for first series entry.
- */
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, rix, 0);
- }
-
- for ( ; i < 4; i++) {
- /*
- * Use twice the number of tries for the last MRR segment.
- */
- if (i + 1 == 4)
- try_per_rate = 8;
-
- ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
-
- /*
- * All other rates in the series have RTS enabled.
- */
- ath_rc_rate_set_series(rate_table, &rates[i], txrc,
- try_per_rate, rix, 1);
- }
-
- /*
- * NB:Change rate series to enable aggregation when operating
- * at lower MCS rates. When first rate in series is MCS2
- * in HT40 @ 2.4GHz, series should look like:
- *
- * {MCS2, MCS1, MCS0, MCS0}.
- *
- * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
- * look like:
- *
- * {MCS3, MCS2, MCS1, MCS1}
- *
- * So, set fourth rate in series to be same as third one for
- * above conditions.
- */
- if ((sc->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) &&
- (conf_is_ht(&sc->hw->conf))) {
- u8 dot11rate = rate_table->info[rix].dot11rate;
- u8 phy = rate_table->info[rix].phy;
- if (i == 4 &&
- ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
- (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
- rates[3].idx = rates[2].idx;
- rates[3].flags = rates[2].flags;
- }
- }
-
- /*
- * Force hardware to use computed duration for next
- * fragment by disabling multi-rate retry, which
- * updates duration based on the multi-rate duration table.
- *
- * FIXME: Fix duration
- */
- if (ieee80211_has_morefrags(fc) ||
- (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
- rates[1].count = rates[2].count = rates[3].count = 0;
- rates[1].idx = rates[2].idx = rates[3].idx = 0;
- rates[0].count = ATH_TXMAXTRY;
- }
-
- ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
-}
-
-static void ath_rc_update_per(struct ath_softc *sc,
- const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_info *tx_info,
- int tx_rate, int xretries, int retries,
- u32 now_msec)
-{
- int count, n_bad_frames;
- u8 last_per;
- static const u32 nretry_to_per_lookup[10] = {
- 100 * 0 / 1,
- 100 * 1 / 4,
- 100 * 1 / 2,
- 100 * 3 / 4,
- 100 * 4 / 5,
- 100 * 5 / 6,
- 100 * 6 / 7,
- 100 * 7 / 8,
- 100 * 8 / 9,
- 100 * 9 / 10
- };
-
- last_per = ath_rc_priv->per[tx_rate];
- n_bad_frames = tx_info->status.ampdu_len - tx_info->status.ampdu_ack_len;
-
- if (xretries) {
- if (xretries == 1) {
- ath_rc_priv->per[tx_rate] += 30;
- if (ath_rc_priv->per[tx_rate] > 100)
- ath_rc_priv->per[tx_rate] = 100;
- } else {
- /* xretries == 2 */
- count = ARRAY_SIZE(nretry_to_per_lookup);
- if (retries >= count)
- retries = count - 1;
-
- /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
- ath_rc_priv->per[tx_rate] =
- (u8)(last_per - (last_per >> 3) + (100 >> 3));
- }
-
- /* xretries == 1 or 2 */
-
- if (ath_rc_priv->probe_rate == tx_rate)
- ath_rc_priv->probe_rate = 0;
-
- } else { /* xretries == 0 */
- count = ARRAY_SIZE(nretry_to_per_lookup);
- if (retries >= count)
- retries = count - 1;
-
- if (n_bad_frames) {
- /* new_PER = 7/8*old_PER + 1/8*(currentPER)
- * Assuming that n_frames is not 0. The current PER
- * from the retries is 100 * retries / (retries+1),
- * since the first retries attempts failed, and the
- * next one worked. For the one that worked,
- * n_bad_frames subframes out of n_frames wored,
- * so the PER for that part is
- * 100 * n_bad_frames / n_frames, and it contributes
- * 100 * n_bad_frames / (n_frames * (retries+1)) to
- * the above PER. The expression below is a
- * simplified version of the sum of these two terms.
- */
- if (tx_info->status.ampdu_len > 0) {
- int n_frames, n_bad_tries;
- u8 cur_per, new_per;
-
- n_bad_tries = retries * tx_info->status.ampdu_len +
- n_bad_frames;
- n_frames = tx_info->status.ampdu_len * (retries + 1);
- cur_per = (100 * n_bad_tries / n_frames) >> 3;
- new_per = (u8)(last_per - (last_per >> 3) + cur_per);
- ath_rc_priv->per[tx_rate] = new_per;
- }
- } else {
- ath_rc_priv->per[tx_rate] =
- (u8)(last_per - (last_per >> 3) +
- (nretry_to_per_lookup[retries] >> 3));
- }
-
-
- /*
- * If we got at most one retry then increase the max rate if
- * this was a probe. Otherwise, ignore the probe.
- */
- if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) {
- if (retries > 0 || 2 * n_bad_frames > tx_info->status.ampdu_len) {
- /*
- * Since we probed with just a single attempt,
- * any retries means the probe failed. Also,
- * if the attempt worked, but more than half
- * the subframes were bad then also consider
- * the probe a failure.
- */
- ath_rc_priv->probe_rate = 0;
- } else {
- u8 probe_rate = 0;
-
- ath_rc_priv->rate_max_phy =
- ath_rc_priv->probe_rate;
- probe_rate = ath_rc_priv->probe_rate;
-
- if (ath_rc_priv->per[probe_rate] > 30)
- ath_rc_priv->per[probe_rate] = 20;
-
- ath_rc_priv->probe_rate = 0;
-
- /*
- * Since this probe succeeded, we allow the next
- * probe twice as soon. This allows the maxRate
- * to move up faster if the probes are
- * successful.
- */
- ath_rc_priv->probe_time =
- now_msec - rate_table->probe_interval / 2;
- }
- }
-
- if (retries > 0) {
- /*
- * Don't update anything. We don't know if
- * this was because of collisions or poor signal.
- */
- ath_rc_priv->hw_maxretry_pktcnt = 0;
- } else {
- /*
- * It worked with no retries. First ignore bogus (small)
- * rssi_ack values.
- */
- if (tx_rate == ath_rc_priv->rate_max_phy &&
- ath_rc_priv->hw_maxretry_pktcnt < 255) {
- ath_rc_priv->hw_maxretry_pktcnt++;
- }
-
- }
- }
-}
-
-static void ath_rc_update_ht(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_info *tx_info,
- int tx_rate, int xretries, int retries)
-{
- u32 now_msec = jiffies_to_msecs(jiffies);
- int rate;
- u8 last_per;
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- int size = ath_rc_priv->rate_table_size;
-
- if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt))
- return;
-
- last_per = ath_rc_priv->per[tx_rate];
-
- /* Update PER first */
- ath_rc_update_per(sc, rate_table, ath_rc_priv,
- tx_info, tx_rate, xretries,
- retries, now_msec);
-
- /*
- * If this rate looks bad (high PER) then stop using it for
- * a while (except if we are probing).
- */
- if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
- rate_table->info[tx_rate].ratekbps <=
- rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
- ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
- &ath_rc_priv->rate_max_phy);
-
- /* Don't probe for a little while. */
- ath_rc_priv->probe_time = now_msec;
- }
-
- /* Make sure the rates below this have lower PER */
- /* Monotonicity is kept only for rates below the current rate. */
- if (ath_rc_priv->per[tx_rate] < last_per) {
- for (rate = tx_rate - 1; rate >= 0; rate--) {
-
- if (ath_rc_priv->per[rate] >
- ath_rc_priv->per[rate+1]) {
- ath_rc_priv->per[rate] =
- ath_rc_priv->per[rate+1];
- }
- }
- }
-
- /* Maintain monotonicity for rates above the current rate */
- for (rate = tx_rate; rate < size - 1; rate++) {
- if (ath_rc_priv->per[rate+1] <
- ath_rc_priv->per[rate])
- ath_rc_priv->per[rate+1] =
- ath_rc_priv->per[rate];
- }
-
- /* Every so often, we reduce the thresholds
- * and PER (different for CCK and OFDM). */
- if (now_msec - ath_rc_priv->per_down_time >=
- rate_table->probe_interval) {
- for (rate = 0; rate < size; rate++) {
- ath_rc_priv->per[rate] =
- 7 * ath_rc_priv->per[rate] / 8;
- }
-
- ath_rc_priv->per_down_time = now_msec;
- }
-
- ath_debug_stat_retries(ath_rc_priv, tx_rate, xretries, retries,
- ath_rc_priv->per[tx_rate]);
-
-}
-
-static void ath_rc_tx_status(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->status.rates;
- struct ieee80211_tx_rate *rate;
- int final_ts_idx = 0, xretries = 0, long_retry = 0;
- u8 flags;
- u32 i = 0, rix;
-
- for (i = 0; i < sc->hw->max_rates; i++) {
- rate = &tx_info->status.rates[i];
- if (rate->idx < 0 || !rate->count)
- break;
-
- final_ts_idx = i;
- long_retry = rate->count - 1;
- }
-
- if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
- xretries = 1;
-
- /*
- * If the first rate is not the final index, there
- * are intermediate rate failures to be processed.
- */
- if (final_ts_idx != 0) {
- for (i = 0; i < final_ts_idx ; i++) {
- if (rates[i].count != 0 && (rates[i].idx >= 0)) {
- flags = rates[i].flags;
-
- /* If HT40 and we have switched mode from
- * 40 to 20 => don't update */
-
- if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
- return;
-
- rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
- ath_rc_update_ht(sc, ath_rc_priv, tx_info,
- rix, xretries ? 1 : 2,
- rates[i].count);
- }
- }
- }
-
- flags = rates[final_ts_idx].flags;
-
- /* If HT40 and we have switched mode from 40 to 20 => don't update */
- if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
- return;
-
- rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
- ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
- ath_debug_stat_rc(ath_rc_priv, rix);
-}
-
-static const
-struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
- enum ieee80211_band band,
- bool is_ht)
-{
- switch(band) {
- case IEEE80211_BAND_2GHZ:
- if (is_ht)
- return &ar5416_11ng_ratetable;
- return &ar5416_11g_ratetable;
- case IEEE80211_BAND_5GHZ:
- if (is_ht)
- return &ar5416_11na_ratetable;
- return &ar5416_11a_ratetable;
- default:
- return NULL;
- }
-}
-
-static void ath_rc_init(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 i, j, k, hi = 0, hthi = 0;
-
- ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
-
- for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
- ath_rc_priv->per[i] = 0;
- ath_rc_priv->valid_rate_index[i] = 0;
- }
-
- for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < RATE_TABLE_SIZE; j++)
- ath_rc_priv->valid_phy_rateidx[i][j] = 0;
- ath_rc_priv->valid_phy_ratecnt[i] = 0;
- }
-
- if (!rateset->rs_nrates) {
- hi = ath_rc_init_validrates(ath_rc_priv);
- } else {
- hi = ath_rc_setvalid_rates(ath_rc_priv, true);
-
- if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
- hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
-
- hi = max(hi, hthi);
- }
-
- ath_rc_priv->rate_table_size = hi + 1;
- ath_rc_priv->rate_max_phy = 0;
- WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-
- for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
- ath_rc_priv->valid_rate_index[k++] =
- ath_rc_priv->valid_phy_rateidx[i][j];
- }
-
- if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
- !ath_rc_priv->valid_phy_ratecnt[i])
- continue;
-
- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
- }
- WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
- WARN_ON(k > RATE_TABLE_SIZE);
-
- ath_rc_priv->max_valid_rate = k;
- ath_rc_sort_validrates(ath_rc_priv);
- ath_rc_priv->rate_max_phy = (k > 4) ?
- ath_rc_priv->valid_rate_index[k-4] :
- ath_rc_priv->valid_rate_index[k-1];
-
- ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
- ath_rc_priv->ht_cap);
-}
-
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
-{
- u8 caps = 0;
-
- if (sta->ht_cap.ht_supported) {
- caps = WLAN_RC_HT_FLAG;
- if (sta->ht_cap.mcs.rx_mask[1] && sta->ht_cap.mcs.rx_mask[2])
- caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
- else if (sta->ht_cap.mcs.rx_mask[1])
- caps |= WLAN_RC_DS_FLAG;
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
- caps |= WLAN_RC_40_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
- caps |= WLAN_RC_SGI_FLAG;
- } else {
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
- caps |= WLAN_RC_SGI_FLAG;
- }
- }
-
- return caps;
-}
-
-static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
- u8 tidno)
-{
- struct ath_node *an = (struct ath_node *)sta->drv_priv;
- struct ath_atx_tid *txtid;
-
- if (!sta->ht_cap.ht_supported)
- return false;
-
- txtid = ATH_AN_2_TID(an, tidno);
- return !txtid->active;
-}
-
-
-/***********************************/
-/* mac80211 Rate Control callbacks */
-/***********************************/
-
-static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
-
- if (!priv_sta || !ieee80211_is_data(fc))
- return;
-
- /* This packet was aggregated but doesn't carry status info */
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
- return;
-
- ath_rc_tx_status(sc, ath_rc_priv, skb);
-
- /* Check if aggregation has to be enabled for this tid */
- if (conf_is_ht(&sc->hw->conf) &&
- !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
- if (ieee80211_is_data_qos(fc) &&
- skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
- u8 *qc, tid;
-
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
-
- if(ath_tx_aggr_check(sc, sta, tid))
- ieee80211_start_tx_ba_session(sta, tid, 0);
- }
- }
-}
-
-static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta)
-{
- struct ath_softc *sc = priv;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- int i, j = 0;
- u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sta->supp_rates[sband->band] & BIT(i)) {
- if ((rate_flags & sband->bitrates[i].flags)
- != rate_flags)
- continue;
-
- ath_rc_priv->neg_rates.rs_rates[j]
- = (sband->bitrates[i].bitrate * 2) / 10;
- j++;
- }
- }
- ath_rc_priv->neg_rates.rs_nrates = j;
-
- if (sta->ht_cap.ht_supported) {
- for (i = 0, j = 0; i < 77; i++) {
- if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
- ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
- if (j == ATH_RATE_MAX)
- break;
- }
- ath_rc_priv->neg_ht_rates.rs_nrates = j;
- }
-
- ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
- sta->ht_cap.ht_supported);
- if (!ath_rc_priv->rate_table) {
- ath_err(common, "No rate table chosen\n");
- return;
- }
-
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
- ath_rc_init(sc, priv_sta);
-}
-
-static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
-
- if (changed & IEEE80211_RC_BW_CHANGED) {
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
- ath_rc_init(sc, priv_sta);
-
- ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
- "Operating Bandwidth changed to: %d\n",
- sc->hw->conf.chandef.width);
- }
-}
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
- struct ath_rc_stats *stats;
-
- stats = &rc->rcstats[final_rate];
- stats->success++;
-}
-
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per)
-{
- struct ath_rc_stats *stats = &rc->rcstats[rix];
-
- stats->xretries += xretries;
- stats->retries += retries;
- stats->per = per;
-}
-
-static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_rate_priv *rc = file->private_data;
- char *buf;
- unsigned int len = 0, max;
- int rix;
- ssize_t retval;
-
- if (rc->rate_table == NULL)
- return 0;
-
- max = 80 + rc->rate_table_size * 1024 + 1;
- buf = kmalloc(max, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len += sprintf(buf, "%6s %6s %6s "
- "%10s %10s %10s %10s\n",
- "HT", "MCS", "Rate",
- "Success", "Retries", "XRetries", "PER");
-
- for (rix = 0; rix < rc->max_valid_rate; rix++) {
- u8 i = rc->valid_rate_index[rix];
- u32 ratekbps = rc->rate_table->info[i].ratekbps;
- struct ath_rc_stats *stats = &rc->rcstats[i];
- char mcs[5];
- char htmode[5];
- int used_mcs = 0, used_htmode = 0;
-
- if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
- used_mcs = scnprintf(mcs, 5, "%d",
- rc->rate_table->info[i].ratecode);
-
- if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
- used_htmode = scnprintf(htmode, 5, "HT40");
- else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
- used_htmode = scnprintf(htmode, 5, "HT20");
- else
- used_htmode = scnprintf(htmode, 5, "????");
- }
-
- mcs[used_mcs] = '\0';
- htmode[used_htmode] = '\0';
-
- len += scnprintf(buf + len, max - len,
- "%6s %6s %3u.%d: "
- "%10u %10u %10u %10u\n",
- htmode,
- mcs,
- ratekbps / 1000,
- (ratekbps % 1000) / 100,
- stats->success,
- stats->retries,
- stats->xretries,
- stats->per);
- }
-
- if (len > max)
- len = max;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return retval;
-}
-
-static const struct file_operations fops_rcstat = {
- .read = read_file_rcstat,
- .open = simple_open,
- .owner = THIS_MODULE
-};
-
-static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct ath_rate_priv *rc = priv_sta;
- rc->debugfs_rcstats = debugfs_create_file("rc_stats", S_IRUGO,
- dir, rc, &fops_rcstat);
-}
-
-static void ath_rate_remove_sta_debugfs(void *priv, void *priv_sta)
-{
- struct ath_rate_priv *rc = priv_sta;
- debugfs_remove(rc->debugfs_rcstats);
-}
-
-#endif /* CONFIG_MAC80211_DEBUGFS && CONFIG_ATH9K_DEBUGFS */
-
-static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-
-static void ath_rate_free(void *priv)
-{
- return;
-}
-
-static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- return kzalloc(sizeof(struct ath_rate_priv), gfp);
-}
-
-static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct ath_rate_priv *rate_priv = priv_sta;
- kfree(rate_priv);
-}
-
-static struct rate_control_ops ath_rate_ops = {
- .module = NULL,
- .name = "ath9k_rate_control",
- .tx_status = ath_tx_status,
- .get_rate = ath_get_rate,
- .rate_init = ath_rate_init,
- .rate_update = ath_rate_update,
- .alloc = ath_rate_alloc,
- .free = ath_rate_free,
- .alloc_sta = ath_rate_alloc_sta,
- .free_sta = ath_rate_free_sta,
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- .add_sta_debugfs = ath_rate_add_sta_debugfs,
- .remove_sta_debugfs = ath_rate_remove_sta_debugfs,
-#endif
-};
-
-int ath_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&ath_rate_ops);
-}
-
-void ath_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&ath_rate_ops);
-}
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
deleted file mode 100644
index b9a87383cb43..000000000000
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (c) 2004 Sam Leffler, Errno Consulting
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2008-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef RC_H
-#define RC_H
-
-#include "hw.h"
-
-struct ath_softc;
-
-#define ATH_RATE_MAX 30
-#define RATE_TABLE_SIZE 72
-
-#define RC_INVALID 0x0000
-#define RC_LEGACY 0x0001
-#define RC_SS 0x0002
-#define RC_DS 0x0004
-#define RC_TS 0x0008
-#define RC_HT_20 0x0010
-#define RC_HT_40 0x0020
-
-#define RC_STREAM_MASK 0xe
-#define RC_DS_OR_LATER(f) ((((f) & RC_STREAM_MASK) == RC_DS) || \
- (((f) & RC_STREAM_MASK) == (RC_DS | RC_TS)))
-#define RC_TS_ONLY(f) (((f) & RC_STREAM_MASK) == RC_TS)
-#define RC_SS_OR_LEGACY(f) ((f) & (RC_SS | RC_LEGACY))
-
-#define RC_HT_2040 (RC_HT_20 | RC_HT_40)
-#define RC_ALL_STREAM (RC_SS | RC_DS | RC_TS)
-#define RC_L_SD (RC_LEGACY | RC_SS | RC_DS)
-#define RC_L_SDT (RC_LEGACY | RC_SS | RC_DS | RC_TS)
-#define RC_HT_S_20 (RC_HT_20 | RC_SS)
-#define RC_HT_D_20 (RC_HT_20 | RC_DS)
-#define RC_HT_T_20 (RC_HT_20 | RC_TS)
-#define RC_HT_S_40 (RC_HT_40 | RC_SS)
-#define RC_HT_D_40 (RC_HT_40 | RC_DS)
-#define RC_HT_T_40 (RC_HT_40 | RC_TS)
-
-#define RC_HT_SD_20 (RC_HT_20 | RC_SS | RC_DS)
-#define RC_HT_DT_20 (RC_HT_20 | RC_DS | RC_TS)
-#define RC_HT_SD_40 (RC_HT_40 | RC_SS | RC_DS)
-#define RC_HT_DT_40 (RC_HT_40 | RC_DS | RC_TS)
-
-#define RC_HT_SD_2040 (RC_HT_2040 | RC_SS | RC_DS)
-#define RC_HT_SDT_2040 (RC_HT_2040 | RC_SS | RC_DS | RC_TS)
-
-#define RC_HT_SDT_20 (RC_HT_20 | RC_SS | RC_DS | RC_TS)
-#define RC_HT_SDT_40 (RC_HT_40 | RC_SS | RC_DS | RC_TS)
-
-#define RC_ALL (RC_LEGACY | RC_HT_2040 | RC_ALL_STREAM)
-
-enum {
- WLAN_RC_PHY_OFDM,
- WLAN_RC_PHY_CCK,
- WLAN_RC_PHY_HT_20_SS,
- WLAN_RC_PHY_HT_20_DS,
- WLAN_RC_PHY_HT_20_TS,
- WLAN_RC_PHY_HT_40_SS,
- WLAN_RC_PHY_HT_40_DS,
- WLAN_RC_PHY_HT_40_TS,
- WLAN_RC_PHY_HT_20_SS_HGI,
- WLAN_RC_PHY_HT_20_DS_HGI,
- WLAN_RC_PHY_HT_20_TS_HGI,
- WLAN_RC_PHY_HT_40_SS_HGI,
- WLAN_RC_PHY_HT_40_DS_HGI,
- WLAN_RC_PHY_HT_40_TS_HGI,
- WLAN_RC_PHY_MAX
-};
-
-#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
- || (_phy == WLAN_RC_PHY_HT_40_DS) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
-#define WLAN_RC_PHY_TS(_phy) ((_phy == WLAN_RC_PHY_HT_20_TS) \
- || (_phy == WLAN_RC_PHY_HT_40_TS) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
- || (_phy == WLAN_RC_PHY_HT_20_DS) \
- || (_phy == WLAN_RC_PHY_HT_20_TS) \
- || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI))
-#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
- || (_phy == WLAN_RC_PHY_HT_40_DS) \
- || (_phy == WLAN_RC_PHY_HT_40_TS) \
- || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-
-#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
-
-#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
- ((capflag & WLAN_RC_40_FLAG) ? RC_HT_40 : RC_HT_20) : RC_LEGACY))
-
-#define WLAN_RC_CAP_STREAM(capflag) (((capflag & WLAN_RC_TS_FLAG) ? \
- (RC_TS) : ((capflag & WLAN_RC_DS_FLAG) ? RC_DS : RC_SS)))
-
-/* Return TRUE if flag supports HT20 && client supports HT20 or
- * return TRUE if flag supports HT40 && client supports HT40.
- * This is used becos some rates overlap between HT20/HT40.
- */
-#define WLAN_RC_PHY_HT_VALID(flag, capflag) \
- (((flag & RC_HT_20) && !(capflag & WLAN_RC_40_FLAG)) || \
- ((flag & RC_HT_40) && (capflag & WLAN_RC_40_FLAG)))
-
-#define WLAN_RC_DS_FLAG (0x01)
-#define WLAN_RC_TS_FLAG (0x02)
-#define WLAN_RC_40_FLAG (0x04)
-#define WLAN_RC_SGI_FLAG (0x08)
-#define WLAN_RC_HT_FLAG (0x10)
-
-/**
- * struct ath_rate_table - Rate Control table
- * @rate_cnt: total number of rates for the given wireless mode
- * @mcs_start: MCS rate index offset
- * @rate_flags: Rate Control flags
- * @phy: CCK/OFDM/HT20/HT40
- * @ratekbps: rate in Kbits per second
- * @user_ratekbps: user rate in Kbits per second
- * @ratecode: rate that goes into HW descriptors
- * @dot11rate: value that goes into supported
- * rates info element of MLME
- * @ctrl_rate: Index of next lower basic rate, used for duration computation
- * @cw40index: Index of rates having 40MHz channel width
- * @sgi_index: Index of rates having Short Guard Interval
- * @ht_index: high throughput rates having 40MHz channel width and
- * Short Guard Interval
- * @probe_interval: interval for rate control to probe for other rates
- * @initial_ratemax: initial ratemax value
- */
-struct ath_rate_table {
- int rate_cnt;
- int mcs_start;
- struct {
- u16 rate_flags;
- u8 phy;
- u32 ratekbps;
- u32 user_ratekbps;
- u8 ratecode;
- u8 dot11rate;
- } info[RATE_TABLE_SIZE];
- u32 probe_interval;
- u8 initial_ratemax;
-};
-
-struct ath_rateset {
- u8 rs_nrates;
- u8 rs_rates[ATH_RATE_MAX];
-};
-
-struct ath_rc_stats {
- u32 success;
- u32 retries;
- u32 xretries;
- u8 per;
-};
-
-/**
- * struct ath_rate_priv - Rate Control priv data
- * @state: RC state
- * @probe_rate: rate we are probing at
- * @probe_time: msec timestamp for last probe
- * @hw_maxretry_pktcnt: num of packets since we got HW max retry error
- * @max_valid_rate: maximum number of valid rate
- * @per_down_time: msec timestamp for last PER down step
- * @valid_phy_ratecnt: valid rate count
- * @rate_max_phy: phy index for the max rate
- * @per: PER for every valid rate in %
- * @probe_interval: interval for ratectrl to probe for other rates
- * @ht_cap: HT capabilities
- * @neg_rates: Negotatied rates
- * @neg_ht_rates: Negotiated HT rates
- */
-struct ath_rate_priv {
- u8 rate_table_size;
- u8 probe_rate;
- u8 hw_maxretry_pktcnt;
- u8 max_valid_rate;
- u8 valid_rate_index[RATE_TABLE_SIZE];
- u8 ht_cap;
- u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
- u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE];
- u8 rate_max_phy;
- u8 per[RATE_TABLE_SIZE];
- u32 probe_time;
- u32 per_down_time;
- u32 probe_interval;
- struct ath_rateset neg_rates;
- struct ath_rateset neg_ht_rates;
- const struct ath_rate_table *rate_table;
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- struct dentry *debugfs_rcstats;
- struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
-#endif
-};
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per);
-#else
-static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-}
-static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per)
-{
-}
-#endif
-
-#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
-int ath_rate_control_register(void);
-void ath_rate_control_unregister(void);
-#else
-static inline int ath_rate_control_register(void)
-{
- return 0;
-}
-
-static inline void ath_rate_control_unregister(void)
-{
-}
-#endif
-
-#endif /* RC_H */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 82e340d3ec60..6c9accdb52e4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -762,204 +762,6 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
return bf;
}
-/* Assumes you've already done the endian to CPU conversion */
-static bool ath9k_rx_accept(struct ath_common *common,
- struct ieee80211_hdr *hdr,
- struct ieee80211_rx_status *rxs,
- struct ath_rx_status *rx_stats,
- bool *decrypt_error)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- bool is_mc, is_valid_tkip, strip_mic, mic_error;
- struct ath_hw *ah = common->ah;
- __le16 fc;
-
- fc = hdr->frame_control;
-
- is_mc = !!is_multicast_ether_addr(hdr->addr1);
- is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
- test_bit(rx_stats->rs_keyix, common->tkip_keymap);
- strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
- ieee80211_has_protected(fc) &&
- !(rx_stats->rs_status &
- (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
- ATH9K_RXERR_KEYMISS));
-
- /*
- * Key miss events are only relevant for pairwise keys where the
- * descriptor does contain a valid key index. This has been observed
- * mostly with CCMP encryption.
- */
- if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
- !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
- rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
-
- mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
- !ieee80211_has_morefrags(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
- (rx_stats->rs_status & ATH9K_RXERR_MIC);
-
- /*
- * The rx_stats->rs_status will not be set until the end of the
- * chained descriptors so it can be ignored if rs_more is set. The
- * rs_more will be false at the last element of the chained
- * descriptors.
- */
- if (rx_stats->rs_status != 0) {
- u8 status_mask;
-
- if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
- rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
- mic_error = false;
- }
-
- if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
- (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
- *decrypt_error = true;
- mic_error = false;
- }
-
- /*
- * Reject error frames with the exception of
- * decryption and MIC failures. For monitor mode,
- * we also ignore the CRC error.
- */
- status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_KEYMISS;
-
- if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
- status_mask |= ATH9K_RXERR_CRC;
-
- if (rx_stats->rs_status & ~status_mask)
- return false;
- }
-
- /*
- * For unicast frames the MIC error bit can have false positives,
- * so all MIC error reports need to be validated in software.
- * False negatives are not common, so skip software verification
- * if the hardware considers the MIC valid.
- */
- if (strip_mic)
- rxs->flag |= RX_FLAG_MMIC_STRIPPED;
- else if (is_mc && mic_error)
- rxs->flag |= RX_FLAG_MMIC_ERROR;
-
- return true;
-}
-
-static int ath9k_process_rate(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs)
-{
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
- unsigned int i = 0;
- struct ath_softc __maybe_unused *sc = common->priv;
- struct ath_hw *ah = sc->sc_ah;
-
- band = ah->curchan->chan->band;
- sband = hw->wiphy->bands[band];
-
- if (IS_CHAN_QUARTER_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_5MHZ;
- else if (IS_CHAN_HALF_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_10MHZ;
-
- if (rx_stats->rs_rate & 0x80) {
- /* HT rate */
- rxs->flag |= RX_FLAG_HT;
- rxs->flag |= rx_stats->flag;
- rxs->rate_idx = rx_stats->rs_rate & 0x7f;
- return 0;
- }
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
- rxs->rate_idx = i;
- return 0;
- }
- if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
- rxs->flag |= RX_FLAG_SHORTPRE;
- rxs->rate_idx = i;
- return 0;
- }
- }
-
- /*
- * No valid hardware bitrate found -- we should not get here
- * because hardware has already validated this frame as OK.
- */
- ath_dbg(common, ANY,
- "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
- rx_stats->rs_rate);
- RX_STAT_INC(rx_rate_err);
- return -EINVAL;
-}
-
-static void ath9k_process_rssi(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = common->ah;
- int last_rssi;
- int rssi = rx_stats->rs_rssi;
- int i, j;
-
- /*
- * RSSI is not available for subframes in an A-MPDU.
- */
- if (rx_stats->rs_moreaggr) {
- rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
- return;
- }
-
- /*
- * Check if the RSSI for the last subframe in an A-MPDU
- * or an unaggregated frame is valid.
- */
- if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
- rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
- return;
- }
-
- for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
- s8 rssi;
-
- if (!(ah->rxchainmask & BIT(i)))
- continue;
-
- rssi = rx_stats->rs_rssi_ctl[i];
- if (rssi != ATH9K_RSSI_BAD) {
- rxs->chains |= BIT(j);
- rxs->chain_signal[j] = ah->noise + rssi;
- }
- j++;
- }
-
- /*
- * Update Beacon RSSI, this is used by ANI.
- */
- if (rx_stats->is_mybeacon &&
- ((ah->opmode == NL80211_IFTYPE_STATION) ||
- (ah->opmode == NL80211_IFTYPE_ADHOC))) {
- ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
- last_rssi = sc->last_rssi;
-
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
- if (rssi < 0)
- rssi = 0;
-
- ah->stats.avgbrssi = rssi;
- }
-
- rxs->signal = ah->noise + rx_stats->rs_rssi;
-}
-
static void ath9k_process_tsf(struct ath_rx_status *rs,
struct ieee80211_rx_status *rxs,
u64 tsf)
@@ -1055,7 +857,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter))
return -EINVAL;
if (ath_is_mybeacon(common, hdr)) {
@@ -1069,10 +871,18 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
if (WARN_ON(!ah->curchan))
return -EINVAL;
- if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+ if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
+ /*
+ * No valid hardware bitrate found -- we should not get here
+ * because hardware has already validated this frame as OK.
+ */
+ ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
+ rx_stats->rs_rate);
+ RX_STAT_INC(rx_rate_err);
return -EINVAL;
+ }
- ath9k_process_rssi(common, hw, rx_stats, rx_status);
+ ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
rx_status->band = ah->curchan->chan->band;
rx_status->freq = ah->curchan->chan->center_freq;
@@ -1092,57 +902,6 @@ corrupt:
return -EINVAL;
}
-static void ath9k_rx_skb_postprocess(struct ath_common *common,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- bool decrypt_error)
-{
- struct ath_hw *ah = common->ah;
- struct ieee80211_hdr *hdr;
- int hdrlen, padpos, padsize;
- u8 keyix;
- __le16 fc;
-
- /* see if any padding is done by the hw and remove it */
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- fc = hdr->frame_control;
- padpos = ieee80211_hdrlen(fc);
-
- /* The MAC header is padded to have 32-bit boundary if the
- * packet payload is non-zero. The general calculation for
- * padsize would take into account odd header lengths:
- * padsize = (4 - padpos % 4) % 4; However, since only
- * even-length headers are used, padding can only be 0 or 2
- * bytes and we can optimize this a bit. In addition, we must
- * not try to remove padding from short control frames that do
- * not have payload. */
- padsize = padpos & 3;
- if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, padpos);
- skb_pull(skb, padsize);
- }
-
- keyix = rx_stats->rs_keyix;
-
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
- ieee80211_has_protected(fc)) {
- rxs->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc)
- && !decrypt_error && skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
-
- if (test_bit(keyix, common->keymap))
- rxs->flag |= RX_FLAG_DECRYPTED;
- }
- if (ah->sw_mgmt_crypto &&
- (rxs->flag & RX_FLAG_DECRYPTED) &&
- ieee80211_is_mgmt(fc))
- /* Use software decrypt for management frames. */
- rxs->flag &= ~RX_FLAG_DECRYPTED;
-}
-
/*
* Run the LNA combining algorithm only in these cases:
*
@@ -1292,8 +1051,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb_pull(skb, ah->caps.rx_status_len);
if (!rs.rs_more)
- ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
- rxs, decrypt_error);
+ ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
+ rxs, decrypt_error);
if (rs.rs_more) {
RX_STAT_INC(rx_frags);
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index b686a7498450..a65cfb91adca 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -108,7 +108,7 @@ static int ath9k_tx99_init(struct ath_softc *sc)
struct ath_tx_control txctl;
int r;
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_err(common,
"driver is in invalid state unable to use TX99");
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 1b3230fa3651..2879887f5691 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -198,7 +198,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
ath_cancel_work(sc);
ath_stop_ani(sc);
- if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
ret = -EINVAL;
goto fail_wow;
@@ -224,7 +224,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
* STA.
*/
- if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
ath_dbg(common, WOW, "None of the STA vifs are associated\n");
ret = 1;
goto fail_wow;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 55897d508a76..87cbec47fb48 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1040,11 +1040,11 @@ static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
int symbols, bits;
int bytes = 0;
+ usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
bits -= OFDM_PLCP_BITS;
bytes = bits / 8;
- bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
if (bytes > 65532)
bytes = 65532;
@@ -1076,6 +1076,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_info *info, int len, bool rts)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *rates;
@@ -1145,7 +1146,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
}
/* legacy rates */
- rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
+ rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
!(rate->flags & IEEE80211_RATE_ERP_G))
phy = WLAN_RC_PHY_CCK;
@@ -1698,7 +1699,7 @@ int ath_cabq_update(struct ath_softc *sc)
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
- qi.tqi_readyTime = (cur_conf->beacon_interval *
+ qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
ATH_CABQ_READY_TIME) / 100;
ath_txq_update(sc, qnum, &qi);
@@ -1768,7 +1769,7 @@ bool ath_drain_all_txq(struct ath_softc *sc)
int i;
u32 npend = 0;
- if (test_bit(SC_OP_INVALID, &sc->sc_flags))
+ if (test_bit(ATH_OP_INVALID, &common->op_flags))
return true;
ath9k_hw_abort_tx_dma(ah);
@@ -1816,11 +1817,12 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
*/
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
bool sent = false;
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
list_empty(&txq->axq_acq))
return;
@@ -2470,7 +2472,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_txq_lock(sc, txq);
for (;;) {
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
break;
if (list_empty(&txq->axq_q)) {
@@ -2553,7 +2555,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
int status;
for (;;) {
- if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
break;
status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
@@ -2569,7 +2571,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
sc->beacon.tx_processed = true;
sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
- ath9k_csa_is_finished(sc);
+ ath9k_csa_update(sc);
continue;
}
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 536bc46a2912..924135b8e575 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -572,7 +572,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
{
- struct ieee80211_bar *bar = (void *) data;
+ struct ieee80211_bar *bar = data;
struct carl9170_bar_list_entry *entry;
unsigned int queue;
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index e5e905910db4..415393dfb6fc 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -222,7 +222,7 @@ static const struct ieee80211_regdomain *ath_default_world_regdomain(void)
static const struct
ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
{
- switch (reg->regpair->regDmnEnum) {
+ switch (reg->regpair->reg_domain) {
case 0x60:
case 0x61:
case 0x62:
@@ -431,7 +431,7 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
struct ath_regulatory *reg)
{
- switch (reg->regpair->regDmnEnum) {
+ switch (reg->regpair->reg_domain) {
case 0x60:
case 0x63:
case 0x66:
@@ -560,7 +560,7 @@ static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
printk(KERN_DEBUG "ath: EEPROM indicates we "
"should expect a direct regpair map\n");
for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
- if (regDomainPairs[i].regDmnEnum == rd)
+ if (regDomainPairs[i].reg_domain == rd)
return true;
}
printk(KERN_DEBUG
@@ -617,7 +617,7 @@ ath_get_regpair(int regdmn)
if (regdmn == NO_ENUMRD)
return NULL;
for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
- if (regDomainPairs[i].regDmnEnum == regdmn)
+ if (regDomainPairs[i].reg_domain == regdmn)
return &regDomainPairs[i];
}
return NULL;
@@ -741,7 +741,7 @@ static int __ath_regd_init(struct ath_regulatory *reg)
printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n",
reg->alpha2[0], reg->alpha2[1]);
printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
- reg->regpair->regDmnEnum);
+ reg->regpair->reg_domain);
return 0;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index ee25786b4447..73f12f196f14 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -44,6 +44,14 @@ static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
writel(data, wcn->mmio + addr);
}
+#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
+do { \
+ if (wcn->chip_version == WCN36XX_CHIP_3680) \
+ wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
+ else \
+ wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
+} while (0) \
+
static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
{
*data = readl(wcn->mmio + addr);
@@ -680,7 +688,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/* Setting interrupt path */
reg_data = WCN36XX_DXE_CCU_INT;
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+ wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
/***************************************/
/* Init descriptors for TX LOW channel */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index c88562f85de1..35ee7e966bd2 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -28,11 +28,11 @@ H2H_TEST_RX_TX = DMA2
*/
/* DXE registers */
-#define WCN36XX_DXE_MEM_BASE 0x03000000
#define WCN36XX_DXE_MEM_REG 0x202000
#define WCN36XX_DXE_CCU_INT 0xA0011
-#define WCN36XX_DXE_REG_CCU_INT 0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3660 0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3680 0x2050dc
/* TODO This must calculated properly but not hardcoded */
#define WCN36XX_DXE_CTRL_TX_L 0x328a44
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 3c2ef0c32f72..a1f1127d7808 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -4384,11 +4384,13 @@ enum place_holder_in_cap_bitmap {
MAX_FEATURE_SUPPORTED = 128,
};
+#define WCN36XX_HAL_CAPS_SIZE 4
+
struct wcn36xx_hal_feat_caps_msg {
struct wcn36xx_hal_msg_header header;
- u32 feat_caps[4];
+ u32 feat_caps[WCN36XX_HAL_CAPS_SIZE];
} __packed;
/* status codes to help debug rekey failures */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e64a6784079e..4ab5370ab7a6 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/firmware.h>
#include <linux/platform_device.h>
#include "wcn36xx.h"
@@ -177,6 +178,60 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index;
}
+static const char * const wcn36xx_caps_names[] = {
+ "MCC", /* 0 */
+ "P2P", /* 1 */
+ "DOT11AC", /* 2 */
+ "SLM_SESSIONIZATION", /* 3 */
+ "DOT11AC_OPMODE", /* 4 */
+ "SAP32STA", /* 5 */
+ "TDLS", /* 6 */
+ "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
+ "WLANACTIVE_OFFLOAD", /* 8 */
+ "BEACON_OFFLOAD", /* 9 */
+ "SCAN_OFFLOAD", /* 10 */
+ "ROAM_OFFLOAD", /* 11 */
+ "BCN_MISS_OFFLOAD", /* 12 */
+ "STA_POWERSAVE", /* 13 */
+ "STA_ADVANCED_PWRSAVE", /* 14 */
+ "AP_UAPSD", /* 15 */
+ "AP_DFS", /* 16 */
+ "BLOCKACK", /* 17 */
+ "PHY_ERR", /* 18 */
+ "BCN_FILTER", /* 19 */
+ "RTT", /* 20 */
+ "RATECTRL", /* 21 */
+ "WOW" /* 22 */
+};
+
+static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
+{
+ if (x >= ARRAY_SIZE(wcn36xx_caps_names))
+ return "UNKNOWN";
+ return wcn36xx_caps_names[x];
+}
+
+static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
+{
+ int i;
+
+ for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+ if (get_feat_caps(wcn->fw_feat_caps, i))
+ wcn36xx_info("FW Cap %s\n", wcn36xx_get_cap_name(i));
+ }
+}
+
+static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
+{
+ if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
+ wcn36xx_info("Chip is 3680\n");
+ wcn->chip_version = WCN36XX_CHIP_3680;
+ } else {
+ wcn36xx_info("Chip is 3660\n");
+ wcn->chip_version = WCN36XX_CHIP_3660;
+ }
+}
+
static int wcn36xx_start(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
@@ -223,6 +278,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
goto out_free_smd_buf;
}
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_feature_caps_exchange(wcn);
+ if (ret)
+ wcn36xx_warn("Exchange feature caps failed\n");
+ else
+ wcn36xx_feat_caps_info(wcn);
+ }
+
+ wcn36xx_detect_chip_version(wcn);
+
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
if (ret) {
@@ -232,11 +297,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
wcn36xx_debugfs_init(wcn);
- if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
- ret = wcn36xx_smd_feature_caps_exchange(wcn);
- if (ret)
- wcn36xx_warn("Exchange feature caps failed\n");
- }
INIT_LIST_HEAD(&wcn->vif_list);
return 0;
@@ -648,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->enable_beacon);
if (bss_conf->enable_beacon) {
+ vif_priv->dtim_period = bss_conf->dtim_period;
vif_priv->bss_index = 0xff;
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
@@ -992,6 +1053,7 @@ static int wcn36xx_remove(struct platform_device *pdev)
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+ release_firmware(wcn->nv);
mutex_destroy(&wcn->hal_mutex);
ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 750626b0e22d..7bf0ef8a1f56 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -195,9 +195,11 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
int ret = 0;
+ unsigned long start;
wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
init_completion(&wcn->hal_rsp_compl);
+ start = jiffies;
ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed\n");
@@ -205,10 +207,13 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
}
if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
- wcn36xx_err("Timeout while waiting SMD response\n");
+ wcn36xx_err("Timeout! No SMD response in %dms\n",
+ HAL_MSG_TIMEOUT);
ret = -ETIME;
goto out;
}
+ wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms",
+ jiffies_to_msecs(jiffies - start));
out:
return ret;
}
@@ -246,21 +251,22 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
- const struct firmware *nv;
struct nv_data *nv_d;
struct wcn36xx_hal_nv_img_download_req_msg msg_body;
int fw_bytes_left;
int ret;
u16 fm_offset = 0;
- ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
- if (ret) {
- wcn36xx_err("Failed to load nv file %s: %d\n",
- WLAN_NV_FILE, ret);
- goto out_free_nv;
+ if (!wcn->nv) {
+ ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev);
+ if (ret) {
+ wcn36xx_err("Failed to load nv file %s: %d\n",
+ WLAN_NV_FILE, ret);
+ goto out;
+ }
}
- nv_d = (struct nv_data *)nv->data;
+ nv_d = (struct nv_data *)wcn->nv->data;
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
@@ -270,7 +276,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
do {
- fw_bytes_left = nv->size - fm_offset - 4;
+ fw_bytes_left = wcn->nv->size - fm_offset - 4;
if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
msg_body.last_fragment = 0;
msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
@@ -308,10 +314,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
out_unlock:
mutex_unlock(&wcn->hal_mutex);
-out_free_nv:
- release_firmware(nv);
-
- return ret;
+out: return ret;
}
static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
@@ -899,11 +902,12 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
sta_priv->sta_index = params->sta_index;
sta_priv->dpu_desc_index = params->dpu_index;
+ sta_priv->ucast_dpu_sign = params->uc_ucast_sig;
wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+ "hal config sta rsp status %d sta_index %d bssid_index %d uc_ucast_sig %d p2p %d\n",
params->status, params->sta_index, params->bssid_index,
- params->p2p);
+ params->uc_ucast_sig, params->p2p);
return 0;
}
@@ -1118,7 +1122,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
}
- priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+ priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
return 0;
}
@@ -1637,12 +1641,12 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
- wcn36xx_err("Sending hal_exit_bmps failed\n");
+ wcn36xx_err("Sending hal_keep_alive failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
- wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ wcn36xx_err("hal_keep_alive response failed err=%d\n", ret);
goto out;
}
out:
@@ -1682,8 +1686,7 @@ out:
return ret;
}
-static inline void set_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@@ -1697,8 +1700,7 @@ static inline void set_feat_caps(u32 *bitmap,
bitmap[arr_idx] |= (1 << bit_idx);
}
-static inline int get_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
int ret = 0;
@@ -1714,8 +1716,7 @@ static inline int get_feat_caps(u32 *bitmap,
return ret;
}
-static inline void clear_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@@ -1731,8 +1732,8 @@ static inline void clear_feat_caps(u32 *bitmap,
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
- struct wcn36xx_hal_feat_caps_msg msg_body;
- int ret = 0;
+ struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
+ int ret = 0, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -1746,12 +1747,15 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
- if (ret) {
- wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
- ret);
+ if (wcn->hal_rsp_len != sizeof(*rsp)) {
+ wcn36xx_err("Invalid hal_feature_caps_exchange response");
goto out;
}
+
+ rsp = (struct wcn36xx_hal_feat_caps_msg *) wcn->hal_buf;
+
+ for (i = 0; i < WCN36XX_HAL_CAPS_SIZE; i++)
+ wcn->fw_feat_caps[i] = rsp->feat_caps[i];
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index e7c39019c6f1..008d03423dbf 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -24,7 +24,7 @@
#define WCN36XX_HAL_BUF_SIZE 4096
-#define HAL_MSG_TIMEOUT 200
+#define HAL_MSG_TIMEOUT 500
#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
/* The PNO version info be contained in the rsp msg */
@@ -112,6 +112,9 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5);
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index b2b60e30caaf..32bb26a0db2a 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,8 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
- status.flag, status.vendor_radiotap_len);
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
@@ -132,6 +131,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
struct ieee80211_vif,
drv_priv);
+ bd->dpu_sign = sta_priv->ucast_dpu_sign;
if (vif->type == NL80211_IFTYPE_STATION) {
bd->sta_index = sta_priv->bss_sta_index;
bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
@@ -145,10 +145,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
__vif_priv = get_vif_by_addr(wcn, hdr->addr2);
bd->sta_index = __vif_priv->self_sta_index;
bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
}
- bd->dpu_sign = __vif_priv->ucast_dpu_signature;
-
if (ieee80211_is_nullfunc(hdr->frame_control) ||
(sta_priv && !sta_priv->is_data_encrypted))
bd->dpu_ne = 1;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 8fa5cbace5ab..f0fb81dfd17b 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -125,10 +125,10 @@ struct wcn36xx_vif {
enum wcn36xx_power_state pw_state;
u8 bss_index;
- u8 ucast_dpu_signature;
/* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
u8 self_sta_index;
u8 self_dpu_desc_index;
+ u8 self_ucast_dpu_sign;
};
/**
@@ -159,6 +159,7 @@ struct wcn36xx_sta {
u16 tid;
u8 sta_index;
u8 dpu_desc_index;
+ u8 ucast_dpu_sign;
u8 bss_sta_index;
u8 bss_dpu_desc_index;
bool is_data_encrypted;
@@ -171,10 +172,14 @@ struct wcn36xx {
struct device *dev;
struct list_head vif_list;
+ const struct firmware *nv;
+
u8 fw_revision;
u8 fw_version;
u8 fw_minor;
u8 fw_major;
+ u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
+ u32 chip_version;
/* extra byte for the NULL termination */
u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@@ -222,6 +227,9 @@ struct wcn36xx {
};
+#define WCN36XX_CHIP_3660 0
+#define WCN36XX_CHIP_3680 1
+
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
u8 major,
u8 minor,
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 990dd42ae79e..c7a3465fd02a 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,6 +9,7 @@ wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
wil6210-y += debug.o
+wil6210-y += rx_reorder.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
# for tracing framework to find trace.h
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 5b340769d5bb..4806a49cb61b 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -104,41 +104,125 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type)
return -EOPNOTSUPP;
}
-static int wil_cfg80211_get_station(struct wiphy *wiphy,
- struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
+static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
+ struct station_info *sinfo)
{
- struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- int rc;
struct wmi_notify_req_cmd cmd = {
- .cid = 0,
+ .cid = cid,
.interval_usec = 0,
};
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_notify_req_done_event evt;
+ } __packed reply;
+ struct wil_net_stats *stats = &wil->sta[cid].stats;
+ int rc;
- if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
- return -ENOENT;
-
- /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
- WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20);
+ WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20);
if (rc)
return rc;
+ wil_dbg_wmi(wil, "Link status for CID %d: {\n"
+ " MCS %d TSF 0x%016llx\n"
+ " BF status 0x%08x SNR 0x%08x SQI %d%%\n"
+ " Tx Tpt %d goodput %d Rx goodput %d\n"
+ " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n",
+ cid, le16_to_cpu(reply.evt.bf_mcs),
+ le64_to_cpu(reply.evt.tsf), reply.evt.status,
+ le32_to_cpu(reply.evt.snr_val),
+ reply.evt.sqi,
+ le32_to_cpu(reply.evt.tx_tpt),
+ le32_to_cpu(reply.evt.tx_goodput),
+ le32_to_cpu(reply.evt.rx_goodput),
+ le16_to_cpu(reply.evt.my_rx_sector),
+ le16_to_cpu(reply.evt.my_tx_sector),
+ le16_to_cpu(reply.evt.other_rx_sector),
+ le16_to_cpu(reply.evt.other_tx_sector));
+
sinfo->generation = wil->sinfo_gen;
- sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->filled = STATION_INFO_RX_BYTES |
+ STATION_INFO_TX_BYTES |
+ STATION_INFO_RX_PACKETS |
+ STATION_INFO_TX_PACKETS |
+ STATION_INFO_RX_BITRATE |
+ STATION_INFO_TX_BITRATE |
+ STATION_INFO_RX_DROP_MISC |
+ STATION_INFO_TX_FAILED;
+
sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
- sinfo->txrate.mcs = wil->stats.bf_mcs;
- sinfo->filled |= STATION_INFO_RX_BITRATE;
+ sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
- sinfo->rxrate.mcs = wil->stats.last_mcs_rx;
+ sinfo->rxrate.mcs = stats->last_mcs_rx;
+ sinfo->rx_bytes = stats->rx_bytes;
+ sinfo->rx_packets = stats->rx_packets;
+ sinfo->rx_dropped_misc = stats->rx_dropped;
+ sinfo->tx_bytes = stats->tx_bytes;
+ sinfo->tx_packets = stats->tx_packets;
+ sinfo->tx_failed = stats->tx_errors;
if (test_bit(wil_status_fwconnected, &wil->status)) {
sinfo->filled |= STATION_INFO_SIGNAL;
- sinfo->signal = 12; /* TODO: provide real value */
+ sinfo->signal = reply.evt.sqi;
}
- return 0;
+ return rc;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ int cid = wil_find_cid(wil, mac);
+
+ wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+ if (cid < 0)
+ return cid;
+
+ rc = wil_cid_fill_sinfo(wil, cid, sinfo);
+
+ return rc;
+}
+
+/*
+ * Find @idx-th active STA for station dump.
+ */
+static int wil_find_cid_by_idx(struct wil6210_priv *wil, int idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if (wil->sta[i].status == wil_sta_unused)
+ continue;
+ if (idx == 0)
+ return i;
+ idx--;
+ }
+
+ return -ENOENT;
+}
+
+static int wil_cfg80211_dump_station(struct wiphy *wiphy,
+ struct net_device *dev, int idx,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ int cid = wil_find_cid_by_idx(wil, idx);
+
+ if (cid < 0)
+ return -ENOENT;
+
+ memcpy(mac, wil->sta[cid].addr, ETH_ALEN);
+ wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+
+ rc = wil_cid_fill_sinfo(wil, cid, sinfo);
+
+ return rc;
}
static int wil_cfg80211_change_iface(struct wiphy *wiphy,
@@ -181,6 +265,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
u16 chnl[4];
} __packed cmd;
uint i, n;
+ int rc;
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
@@ -198,7 +283,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
/* FW don't support scan after connection attempt */
if (test_bit(wil_status_dontscan, &wil->status)) {
- wil_err(wil, "Scan after connect attempt not supported\n");
+ wil_err(wil, "Can't scan now\n");
return -EBUSY;
}
@@ -221,8 +306,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
request->channels[i]->center_freq);
}
- return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+ rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+
+ if (rc)
+ wil->scan_request = NULL;
+
+ return rc;
}
static int wil_cfg80211_connect(struct wiphy *wiphy,
@@ -237,6 +327,10 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
int ch;
int rc = 0;
+ if (test_bit(wil_status_fwconnecting, &wil->status) ||
+ test_bit(wil_status_fwconnected, &wil->status))
+ return -EALREADY;
+
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
@@ -318,10 +412,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
memcpy(conn.bssid, bss->bssid, ETH_ALEN);
memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
- /*
- * FW don't support scan after connection attempt
- */
- set_bit(wil_status_dontscan, &wil->status);
+
set_bit(wil_status_fwconnecting, &wil->status);
rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
@@ -330,7 +421,6 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
mod_timer(&wil->connect_timer,
jiffies + msecs_to_jiffies(2000));
} else {
- clear_bit(wil_status_dontscan, &wil->status);
clear_bit(wil_status_fwconnecting, &wil->status);
}
@@ -352,6 +442,40 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
return rc;
}
+static int wil_cfg80211_mgmt_tx(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie)
+{
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ struct ieee80211_mgmt *mgmt_frame = (void *)buf;
+ struct wmi_sw_tx_req_cmd *cmd;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_sw_tx_complete_event evt;
+ } __packed evt;
+
+ cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
+ cmd->len = cpu_to_le16(len);
+ memcpy(cmd->payload, buf, len);
+
+ rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+ WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc == 0)
+ rc = evt.evt.status;
+
+ kfree(cmd);
+
+ return rc;
+}
+
static int wil_cfg80211_set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
@@ -402,6 +526,41 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
return 0;
}
+static int wil_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ /* TODO: handle duration */
+ wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration);
+
+ rc = wmi_set_channel(wil, chan->hw_value);
+ if (rc)
+ return rc;
+
+ rc = wmi_rxon(wil, true);
+
+ return rc;
+}
+
+static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+
+ wil_info(wil, "%s()\n", __func__);
+
+ rc = wmi_rxon(wil, false);
+
+ return rc;
+}
+
static int wil_fix_bcon(struct wil6210_priv *wil,
struct cfg80211_beacon_data *bcon)
{
@@ -450,18 +609,20 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
if (wil_fix_bcon(wil, bcon))
wil_dbg_misc(wil, "Fixed bcon\n");
+ mutex_lock(&wil->mutex);
+
rc = wil_reset(wil);
if (rc)
- return rc;
+ goto out;
/* Rx VRING. */
rc = wil_rx_init(wil);
if (rc)
- return rc;
+ goto out;
rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
if (rc)
- return rc;
+ goto out;
/* MAC address - pre-requisite for other commands */
wmi_set_mac_address(wil, ndev->dev_addr);
@@ -485,11 +646,13 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
channel->hw_value);
if (rc)
- return rc;
+ goto out;
netif_carrier_on(ndev);
+out:
+ mutex_unlock(&wil->mutex);
return rc;
}
@@ -499,17 +662,36 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
int rc = 0;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ mutex_lock(&wil->mutex);
+
rc = wmi_pcp_stop(wil);
+ mutex_unlock(&wil->mutex);
return rc;
}
+static int wil_cfg80211_del_station(struct wiphy *wiphy,
+ struct net_device *dev, u8 *mac)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ mutex_lock(&wil->mutex);
+ wil6210_disconnect(wil, mac);
+ mutex_unlock(&wil->mutex);
+
+ return 0;
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
.scan = wil_cfg80211_scan,
.connect = wil_cfg80211_connect,
.disconnect = wil_cfg80211_disconnect,
.change_virtual_intf = wil_cfg80211_change_iface,
.get_station = wil_cfg80211_get_station,
+ .dump_station = wil_cfg80211_dump_station,
+ .remain_on_channel = wil_remain_on_channel,
+ .cancel_remain_on_channel = wil_cancel_remain_on_channel,
+ .mgmt_tx = wil_cfg80211_mgmt_tx,
.set_monitor_channel = wil_cfg80211_set_channel,
.add_key = wil_cfg80211_add_key,
.del_key = wil_cfg80211_del_key,
@@ -517,6 +699,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
/* AP mode */
.start_ap = wil_cfg80211_start_ap,
.stop_ap = wil_cfg80211_stop_ap,
+ .del_station = wil_cfg80211_del_station,
};
static void wil_wiphy_init(struct wiphy *wiphy)
@@ -542,7 +725,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
/* TODO: figure this out */
- wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
wiphy->cipher_suites = wil_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 1caa31992a7e..ecdabe4adec3 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -26,9 +26,11 @@
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
static u32 dbg_txdesc_index;
+static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
- const char *name, struct vring *vring)
+ const char *name, struct vring *vring,
+ char _s, char _h)
{
void __iomem *x = wmi_addr(wil, vring->hwtail);
@@ -50,8 +52,8 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
volatile struct vring_tx_desc *d = &vring->va[i].tx;
if ((i % 64) == 0 && (i != 0))
seq_printf(s, "\n");
- seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
- "S" : (vring->ctx[i].skb ? "H" : "h"));
+ seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
+ _s : (vring->ctx[i].skb ? _h : 'h'));
}
seq_printf(s, "\n");
}
@@ -63,14 +65,19 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
uint i;
struct wil6210_priv *wil = s->private;
- wil_print_vring(s, wil, "rx", &wil->vring_rx);
+ wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
struct vring *vring = &(wil->vring_tx[i]);
if (vring->va) {
+ int cid = wil->vring2cid_tid[i][0];
+ int tid = wil->vring2cid_tid[i][1];
char name[10];
snprintf(name, sizeof(name), "tx_%2d", i);
- wil_print_vring(s, wil, name, vring);
+
+ seq_printf(s, "\n%pM CID %d TID %d\n",
+ wil->sta[cid].addr, cid, tid);
+ wil_print_vring(s, wil, name, vring, '_', 'H');
}
}
@@ -390,25 +397,78 @@ static const struct file_operations fops_reset = {
.write = wil_write_file_reset,
.open = simple_open,
};
-/*---------Tx descriptor------------*/
+static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
+ const char *prefix)
+{
+ char printbuf[16 * 3 + 2];
+ int i = 0;
+ while (i < len) {
+ int l = min(len - i, 16);
+ hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+ sizeof(printbuf), false);
+ seq_printf(s, "%s%s\n", prefix, printbuf);
+ i += l;
+ }
+}
+
+static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
+{
+ int i = 0;
+ int len = skb_headlen(skb);
+ void *p = skb->data;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ seq_printf(s, " len = %d\n", len);
+ wil_seq_hexdump(s, p, len, " : ");
+
+ if (nr_frags) {
+ seq_printf(s, " nr_frags = %d\n", nr_frags);
+ for (i = 0; i < nr_frags; i++) {
+ const struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[i];
+
+ len = skb_frag_size(frag);
+ p = skb_frag_address_safe(frag);
+ seq_printf(s, " [%2d] : len = %d\n", i, len);
+ wil_seq_hexdump(s, p, len, " : ");
+ }
+ }
+}
+
+/*---------Tx/Rx descriptor------------*/
static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- struct vring *vring = &(wil->vring_tx[0]);
+ struct vring *vring;
+ bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
+ if (tx)
+ vring = &(wil->vring_tx[dbg_vring_index]);
+ else
+ vring = &wil->vring_rx;
if (!vring->va) {
- seq_printf(s, "No Tx VRING\n");
+ if (tx)
+ seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
+ else
+ seq_puts(s, "No Rx VRING\n");
return 0;
}
if (dbg_txdesc_index < vring->size) {
+ /* use struct vring_tx_desc for Rx as well,
+ * only field used, .dma.length, is the same
+ */
volatile struct vring_tx_desc *d =
&(vring->va[dbg_txdesc_index].tx);
volatile u32 *u = (volatile u32 *)d;
struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
- seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
+ if (tx)
+ seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
+ dbg_txdesc_index);
+ else
+ seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[0], u[1], u[2], u[3]);
seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
@@ -416,31 +476,19 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, " SKB = %p\n", skb);
if (skb) {
- char printbuf[16 * 3 + 2];
- int i = 0;
- int len = le16_to_cpu(d->dma.length);
- void *p = skb->data;
-
- if (len != skb_headlen(skb)) {
- seq_printf(s, "!!! len: desc = %d skb = %d\n",
- len, skb_headlen(skb));
- len = min_t(int, len, skb_headlen(skb));
- }
-
- seq_printf(s, " len = %d\n", len);
-
- while (i < len) {
- int l = min(len - i, 16);
- hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
- sizeof(printbuf), false);
- seq_printf(s, " : %s\n", printbuf);
- i += l;
- }
+ skb_get(skb);
+ wil_seq_print_skb(s, skb);
+ kfree_skb(skb);
}
seq_printf(s, "}\n");
} else {
- seq_printf(s, "TxDesc index (%d) >= size (%d)\n",
- dbg_txdesc_index, vring->size);
+ if (tx)
+ seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
+ dbg_vring_index, dbg_txdesc_index,
+ vring->size);
+ else
+ seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
+ dbg_txdesc_index, vring->size);
}
return 0;
@@ -570,6 +618,69 @@ static const struct file_operations fops_temp = {
.llseek = seq_lseek,
};
+/*---------Station matrix------------*/
+static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
+{
+ int i;
+ u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+ seq_printf(s, "0x%03x [", r->head_seq_num);
+ for (i = 0; i < r->buf_size; i++) {
+ if (i == index)
+ seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|');
+ else
+ seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
+ }
+ seq_puts(s, "]\n");
+}
+
+static int wil_sta_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ int i, tid;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ struct wil_sta_info *p = &wil->sta[i];
+ char *status = "unknown";
+ switch (p->status) {
+ case wil_sta_unused:
+ status = "unused ";
+ break;
+ case wil_sta_conn_pending:
+ status = "pending ";
+ break;
+ case wil_sta_connected:
+ status = "connected";
+ break;
+ }
+ seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
+ (p->data_port_open ? " data_port_open" : ""));
+
+ if (p->status == wil_sta_connected) {
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
+ if (r) {
+ seq_printf(s, "[%2d] ", tid);
+ wil_print_rxtid(s, r);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int wil_sta_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_sta_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_sta = {
+ .open = wil_sta_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
/*----------------*/
int wil6210_debugfs_init(struct wil6210_priv *wil)
{
@@ -581,9 +692,13 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
- debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc);
- debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg,
+ debugfs_create_file("stations", S_IRUGO, dbg, wil, &fops_sta);
+ debugfs_create_file("desc", S_IRUGO, dbg, wil, &fops_txdesc);
+ debugfs_create_u32("desc_index", S_IRUGO | S_IWUSR, dbg,
&dbg_txdesc_index);
+ debugfs_create_u32("vring_index", S_IRUGO | S_IWUSR, dbg,
+ &dbg_vring_index);
+
debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 10919f95a83c..5824cd41e4ba 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -195,8 +195,12 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
wil_dbg_irq(wil, "RX done\n");
isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
- wil_dbg_txrx(wil, "NAPI schedule\n");
- napi_schedule(&wil->napi_rx);
+ if (test_bit(wil_status_reset_done, &wil->status)) {
+ wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+ napi_schedule(&wil->napi_rx);
+ } else {
+ wil_err(wil, "Got Rx interrupt while in reset\n");
+ }
}
if (isr)
@@ -226,10 +230,15 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
wil_dbg_irq(wil, "TX done\n");
- napi_schedule(&wil->napi_tx);
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
/* clear also all VRING interrupts */
isr &= ~(BIT(25) - 1UL);
+ if (test_bit(wil_status_reset_done, &wil->status)) {
+ wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+ napi_schedule(&wil->napi_tx);
+ } else {
+ wil_err(wil, "Got Tx interrupt while in reset\n");
+ }
}
if (isr)
@@ -319,6 +328,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
if (isr & ISR_MISC_FW_ERROR) {
wil_notify_fw_error(wil);
isr &= ~ISR_MISC_FW_ERROR;
+ wil_fw_error_recovery(wil);
}
if (isr & ISR_MISC_MBOX_EVT) {
@@ -493,6 +503,23 @@ free0:
return rc;
}
+/* can't use wil_ioread32_and_clear because ICC value is not ser yet */
+static inline void wil_clear32(void __iomem *addr)
+{
+ u32 x = ioread32(addr);
+
+ iowrite32(x, addr);
+}
+
+void wil6210_clear_irq(struct wil6210_priv *wil)
+{
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+}
int wil6210_init_irq(struct wil6210_priv *wil, int irq)
{
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index fd30cddd5882..95f4efe9ef37 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -16,8 +16,14 @@
#include <linux/moduleparam.h>
#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
#include "wil6210.h"
+#include "txrx.h"
+
+static bool no_fw_recovery;
+module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(no_fw_recovery, " disable FW error recovery");
/*
* Due to a hardware issue,
@@ -52,29 +58,74 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
__raw_writel(*s++, d++);
}
-static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
{
uint i;
- struct net_device *ndev = wil_to_ndev(wil);
+ struct wil_sta_info *sta = &wil->sta[cid];
- wil_dbg_misc(wil, "%s()\n", __func__);
+ sta->data_port_open = false;
+ if (sta->status != wil_sta_unused) {
+ wmi_disconnect_sta(wil, sta->addr, WLAN_REASON_DEAUTH_LEAVING);
+ sta->status = wil_sta_unused;
+ }
- wil_link_off(wil);
- if (test_bit(wil_status_fwconnected, &wil->status)) {
- clear_bit(wil_status_fwconnected, &wil->status);
- cfg80211_disconnected(ndev,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- NULL, 0, GFP_KERNEL);
- } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
- cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
+ for (i = 0; i < WIL_STA_TID_NUM; i++) {
+ struct wil_tid_ampdu_rx *r = sta->tid_rx[i];
+ sta->tid_rx[i] = NULL;
+ wil_tid_ampdu_rx_free(wil, r);
+ }
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ if (wil->vring2cid_tid[i][0] == cid)
+ wil_vring_fini_tx(wil, i);
}
- clear_bit(wil_status_fwconnecting, &wil->status);
- for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
- wil_vring_fini_tx(wil, i);
+ memset(&sta->stats, 0, sizeof(sta->stats));
+}
- clear_bit(wil_status_dontscan, &wil->status);
+static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ int cid = -ENOENT;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+
+ might_sleep();
+ if (bssid) {
+ cid = wil_find_cid(wil, bssid);
+ wil_dbg_misc(wil, "%s(%pM, CID %d)\n", __func__, bssid, cid);
+ } else {
+ wil_dbg_misc(wil, "%s(all)\n", __func__);
+ }
+
+ if (cid >= 0) /* disconnect 1 peer */
+ wil_disconnect_cid(wil, cid);
+ else /* disconnect all */
+ for (cid = 0; cid < WIL6210_MAX_CID; cid++)
+ wil_disconnect_cid(wil, cid);
+
+ /* link state */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_link_off(wil);
+ if (test_bit(wil_status_fwconnected, &wil->status)) {
+ clear_bit(wil_status_fwconnected, &wil->status);
+ cfg80211_disconnected(ndev,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ NULL, 0, GFP_KERNEL);
+ } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
+ cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ }
+ clear_bit(wil_status_fwconnecting, &wil->status);
+ break;
+ default:
+ /* AP-like interface and monitor:
+ * never scan, always connected
+ */
+ if (bssid)
+ cfg80211_del_sta(ndev, bssid, GFP_KERNEL);
+ break;
+ }
}
static void wil_disconnect_worker(struct work_struct *work)
@@ -82,7 +133,9 @@ static void wil_disconnect_worker(struct work_struct *work)
struct wil6210_priv *wil = container_of(work,
struct wil6210_priv, disconnect_worker);
+ mutex_lock(&wil->mutex);
_wil6210_disconnect(wil, NULL);
+ mutex_unlock(&wil->mutex);
}
static void wil_connect_timer_fn(ulong x)
@@ -97,12 +150,55 @@ static void wil_connect_timer_fn(ulong x)
schedule_work(&wil->disconnect_worker);
}
+static void wil_fw_error_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work,
+ struct wil6210_priv, fw_error_worker);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wil_dbg_misc(wil, "fw error worker\n");
+
+ if (no_fw_recovery)
+ return;
+
+ mutex_lock(&wil->mutex);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_MONITOR:
+ wil_info(wil, "fw error recovery started...\n");
+ wil_reset(wil);
+
+ /* need to re-allocate Rx ring after reset */
+ wil_rx_init(wil);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ /* recovery in these modes is done by upper layers */
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&wil->mutex);
+}
+
+static int wil_find_free_vring(struct wil6210_priv *wil)
+{
+ int i;
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ if (!wil->vring_tx[i].va)
+ return i;
+ }
+ return -EINVAL;
+}
+
static void wil_connect_worker(struct work_struct *work)
{
int rc;
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
connect_worker);
int cid = wil->pending_connect_cid;
+ int ringid = wil_find_free_vring(wil);
if (cid < 0) {
wil_err(wil, "No connection pending\n");
@@ -111,16 +207,22 @@ static void wil_connect_worker(struct work_struct *work)
wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
- rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, cid, 0);
+ rc = wil_vring_init_tx(wil, ringid, WIL6210_TX_RING_SIZE, cid, 0);
wil->pending_connect_cid = -1;
- if (rc == 0)
+ if (rc == 0) {
+ wil->sta[cid].status = wil_sta_connected;
wil_link_on(wil);
+ } else {
+ wil->sta[cid].status = wil_sta_unused;
+ }
}
int wil_priv_init(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s()\n", __func__);
+ memset(wil->sta, 0, sizeof(wil->sta));
+
mutex_init(&wil->mutex);
mutex_init(&wil->wmi_mutex);
@@ -132,6 +234,7 @@ int wil_priv_init(struct wil6210_priv *wil)
INIT_WORK(&wil->connect_worker, wil_connect_worker);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+ INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
INIT_LIST_HEAD(&wil->pending_wmi_ev);
spin_lock_init(&wil->wmi_ev_lock);
@@ -158,7 +261,10 @@ void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
void wil_priv_deinit(struct wil6210_priv *wil)
{
cancel_work_sync(&wil->disconnect_worker);
+ cancel_work_sync(&wil->fw_error_worker);
+ mutex_lock(&wil->mutex);
wil6210_disconnect(wil, NULL);
+ mutex_unlock(&wil->mutex);
wmi_event_flush(wil);
destroy_workqueue(wil->wmi_wq_conn);
destroy_workqueue(wil->wmi_wq);
@@ -166,40 +272,78 @@ void wil_priv_deinit(struct wil6210_priv *wil)
static void wil_target_reset(struct wil6210_priv *wil)
{
+ int delay = 0;
+ u32 hw_state;
+ u32 rev_id;
+
wil_dbg_misc(wil, "Resetting...\n");
+ /* register read */
+#define R(a) ioread32(wil->csr + HOSTADDR(a))
/* register write */
#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
/* register set = read, OR, write */
-#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
- wil->csr + HOSTADDR(a))
+#define S(a, v) W(a, R(a) | v)
+ /* register clear = read, AND with inverted, write */
+#define C(a, v) W(a, R(a) & ~v)
+ wil->hw_version = R(RGF_USER_FW_REV_ID);
+ rev_id = wil->hw_version & 0xff;
/* hpal_perst_from_pad_src_n_mask */
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
/* car_perst_rst_src_n_mask */
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
+ wmb(); /* order is important here */
W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
+ wmb(); /* order is important here */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
+ wmb(); /* order is important here */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+ wmb(); /* order is important here */
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
- W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+ if (rev_id == 1) {
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+ } else {
+ W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
+ }
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+ wmb(); /* order is important here */
+
+ /* wait until device ready */
+ do {
+ msleep(1);
+ hw_state = R(RGF_USER_HW_MACHINE_STATE);
+ if (delay++ > 100) {
+ wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
+ hw_state);
+ return;
+ }
+ } while (hw_state != HW_MACHINE_BOOT_DONE);
+
+ if (rev_id == 2)
+ W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
+
+ C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+ wmb(); /* order is important here */
- wil_dbg_misc(wil, "Reset completed\n");
+ wil_dbg_misc(wil, "Reset completed in %d ms\n", delay);
+#undef R
#undef W
#undef S
+#undef C
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -234,11 +378,24 @@ int wil_reset(struct wil6210_priv *wil)
{
int rc;
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL);
+ wil->status = 0; /* prevent NAPI from being scheduled */
+ if (test_bit(wil_status_napi_en, &wil->status)) {
+ napi_synchronize(&wil->napi_rx);
+ }
+
+ if (wil->scan_request) {
+ wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
+ wil->scan_request);
+ cfg80211_scan_done(wil->scan_request, true);
+ wil->scan_request = NULL;
+ }
+
wil6210_disable_irq(wil);
- wil->status = 0;
wmi_event_flush(wil);
@@ -248,6 +405,8 @@ int wil_reset(struct wil6210_priv *wil)
/* TODO: put MAC in reset */
wil_target_reset(wil);
+ wil_rx_fini(wil);
+
/* init after reset */
wil->pending_connect_cid = -1;
reinit_completion(&wil->wmi_ready);
@@ -261,6 +420,11 @@ int wil_reset(struct wil6210_priv *wil)
return rc;
}
+void wil_fw_error_recovery(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "starting fw error recovery\n");
+ schedule_work(&wil->fw_error_worker);
+}
void wil_link_on(struct wil6210_priv *wil)
{
@@ -288,6 +452,8 @@ static int __wil_up(struct wil6210_priv *wil)
struct wireless_dev *wdev = wil->wdev;
int rc;
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
rc = wil_reset(wil);
if (rc)
return rc;
@@ -329,6 +495,7 @@ static int __wil_up(struct wil6210_priv *wil)
napi_enable(&wil->napi_rx);
napi_enable(&wil->napi_tx);
+ set_bit(wil_status_napi_en, &wil->status);
return 0;
}
@@ -346,6 +513,9 @@ int wil_up(struct wil6210_priv *wil)
static int __wil_down(struct wil6210_priv *wil)
{
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
+ clear_bit(wil_status_napi_en, &wil->status);
napi_disable(&wil->napi_rx);
napi_disable(&wil->napi_tx);
@@ -370,3 +540,19 @@ int wil_down(struct wil6210_priv *wil)
return rc;
}
+
+int wil_find_cid(struct wil6210_priv *wil, const u8 *mac)
+{
+ int i;
+ int rc = -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if ((wil->sta[i].status != wil_sta_unused) &&
+ ether_addr_equal(wil->sta[i].addr, mac)) {
+ rc = i;
+ break;
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 717178f09aa8..fdcaeb820e75 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -127,8 +127,9 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
ndev->netdev_ops = &wil_netdev_ops;
ndev->ieee80211_ptr = wdev;
- ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
- ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_SG | NETIF_F_GRO;
+ ndev->features |= ndev->hw_features;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eeceab39cda2..f1e1bb338d68 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -41,39 +41,41 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
switch (use_msi) {
case 3:
case 1:
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
+ break;
case 0:
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
break;
default:
- wil_err(wil, "Invalid use_msi=%d, default to 1\n",
- use_msi);
+ wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
use_msi = 1;
}
- wil->n_msi = use_msi;
- if (wil->n_msi) {
- wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
- rc = pci_enable_msi_block(pdev, wil->n_msi);
- if (rc && (wil->n_msi == 3)) {
- wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
- wil->n_msi = 1;
- rc = pci_enable_msi_block(pdev, wil->n_msi);
- }
- if (rc) {
- wil_err(wil, "pci_enable_msi failed, use INTx\n");
- wil->n_msi = 0;
- }
- } else {
- wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+
+ if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ use_msi = 1;
+ }
+
+ if (use_msi == 1 && pci_enable_msi(pdev)) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ use_msi = 0;
}
+ wil->n_msi = use_msi;
+
rc = wil6210_init_irq(wil, pdev->irq);
if (rc)
goto stop_master;
/* need reset here to obtain MAC */
+ mutex_lock(&wil->mutex);
rc = wil_reset(wil);
+ mutex_unlock(&wil->mutex);
if (rc)
goto release_irq;
+ wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
+
return 0;
release_irq:
@@ -151,6 +153,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, wil);
wil->pdev = pdev;
+ wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
rc = wil_if_pcie_enable(wil);
if (rc) {
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
new file mode 100644
index 000000000000..d04629fe053f
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -0,0 +1,177 @@
+#include "wil6210.h"
+#include "txrx.h"
+
+#define SEQ_MODULO 0x1000
+#define SEQ_MASK 0xfff
+
+static inline int seq_less(u16 sq1, u16 sq2)
+{
+ return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
+}
+
+static inline u16 seq_inc(u16 sq)
+{
+ return (sq + 1) & SEQ_MASK;
+}
+
+static inline u16 seq_sub(u16 sq1, u16 sq2)
+{
+ return (sq1 - sq2) & SEQ_MASK;
+}
+
+static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
+{
+ return seq_sub(seq, r->ssn) % r->buf_size;
+}
+
+static void wil_release_reorder_frame(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r,
+ int index)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct sk_buff *skb = r->reorder_buf[index];
+
+ if (!skb)
+ goto no_frame;
+
+ /* release the frame from the reorder ring buffer */
+ r->stored_mpdu_num--;
+ r->reorder_buf[index] = NULL;
+ wil_netif_rx_any(skb, ndev);
+
+no_frame:
+ r->head_seq_num = seq_inc(r->head_seq_num);
+}
+
+static void wil_release_reorder_frames(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r,
+ u16 hseq)
+{
+ int index;
+
+ while (seq_less(r->head_seq_num, hseq)) {
+ index = reorder_index(r, r->head_seq_num);
+ wil_release_reorder_frame(wil, r, index);
+ }
+}
+
+static void wil_reorder_release(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r)
+{
+ int index = reorder_index(r, r->head_seq_num);
+
+ while (r->reorder_buf[index]) {
+ wil_release_reorder_frame(wil, r, index);
+ index = reorder_index(r, r->head_seq_num);
+ }
+}
+
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int tid = wil_rxdesc_tid(d);
+ int cid = wil_rxdesc_cid(d);
+ int mid = wil_rxdesc_mid(d);
+ u16 seq = wil_rxdesc_seq(d);
+ struct wil_sta_info *sta = &wil->sta[cid];
+ struct wil_tid_ampdu_rx *r = sta->tid_rx[tid];
+ u16 hseq;
+ int index;
+
+ wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x\n",
+ mid, cid, tid, seq);
+
+ if (!r) {
+ wil_netif_rx_any(skb, ndev);
+ return;
+ }
+
+ hseq = r->head_seq_num;
+
+ spin_lock(&r->reorder_lock);
+
+ /* frame with out of date sequence number */
+ if (seq_less(seq, r->head_seq_num)) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * If frame the sequence number exceeds our buffering window
+ * size release some previous frames to make room for this one.
+ */
+ if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
+ hseq = seq_inc(seq_sub(seq, r->buf_size));
+ /* release stored frames up to new head to stack */
+ wil_release_reorder_frames(wil, r, hseq);
+ }
+
+ /* Now the new frame is always in the range of the reordering buffer */
+
+ index = reorder_index(r, seq);
+
+ /* check if we already stored this frame */
+ if (r->reorder_buf[index]) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * If the current MPDU is in the right order and nothing else
+ * is stored we can process it directly, no need to buffer it.
+ * If it is first but there's something stored, we may be able
+ * to release frames after this one.
+ */
+ if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
+ r->head_seq_num = seq_inc(r->head_seq_num);
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+
+ /* put the frame in the reordering buffer */
+ r->reorder_buf[index] = skb;
+ r->reorder_time[index] = jiffies;
+ r->stored_mpdu_num++;
+ wil_reorder_release(wil, r);
+
+out:
+ spin_unlock(&r->reorder_lock);
+}
+
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+ int size, u16 ssn)
+{
+ struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ r->reorder_buf =
+ kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
+ r->reorder_time =
+ kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
+ if (!r->reorder_buf || !r->reorder_time) {
+ kfree(r->reorder_buf);
+ kfree(r->reorder_time);
+ kfree(r);
+ return NULL;
+ }
+
+ spin_lock_init(&r->reorder_lock);
+ r->ssn = ssn;
+ r->head_seq_num = ssn;
+ r->buf_size = size;
+ r->stored_mpdu_num = 0;
+ return r;
+}
+
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r)
+{
+ if (!r)
+ return;
+ wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size);
+ kfree(r->reorder_buf);
+ kfree(r->reorder_time);
+ kfree(r);
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 0b0975d88b43..c8c547457eb4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -104,6 +104,23 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return 0;
}
+static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
+ struct wil_ctx *ctx)
+{
+ dma_addr_t pa = wil_desc_addr(&d->dma.addr);
+ u16 dmalen = le16_to_cpu(d->dma.length);
+ switch (ctx->mapped_as) {
+ case wil_mapped_as_single:
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ case wil_mapped_as_page:
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
int tx)
{
@@ -122,15 +139,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
ctx = &vring->ctx[vring->swtail];
*d = *_d;
- pa = wil_desc_addr(&d->dma.addr);
- dmalen = le16_to_cpu(d->dma.length);
- if (vring->ctx[vring->swtail].mapped_as_page) {
- dma_unmap_page(dev, pa, dmalen,
- DMA_TO_DEVICE);
- } else {
- dma_unmap_single(dev, pa, dmalen,
- DMA_TO_DEVICE);
- }
+ wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
vring->swtail = wil_vring_next_tail(vring);
@@ -344,6 +353,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
u16 dmalen;
u8 ftype;
u8 ds_bits;
+ int cid;
+ struct wil_net_stats *stats;
+
BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
@@ -383,8 +395,10 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
-
- wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
+ cid = wil_rxdesc_cid(d);
+ stats = &wil->sta[cid].stats;
+ stats->last_mcs_rx = wil_rxdesc_mcs(d);
+ wil->stats.last_mcs_rx = stats->last_mcs_rx;
/* use radiotap header only if required */
if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
@@ -472,21 +486,28 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
* Pass Rx packet to the netif. Update statistics.
* Called in softirq context (NAPI poll).
*/
-static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
{
- int rc;
+ gro_result_t rc;
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
unsigned int len = skb->len;
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int cid = wil_rxdesc_cid(d);
+ struct wil_net_stats *stats = &wil->sta[cid].stats;
skb_orphan(skb);
- rc = netif_receive_skb(skb);
+ rc = napi_gro_receive(&wil->napi_rx, skb);
- if (likely(rc == NET_RX_SUCCESS)) {
+ if (unlikely(rc == GRO_DROP)) {
+ ndev->stats.rx_dropped++;
+ stats->rx_dropped++;
+ wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+ } else {
ndev->stats.rx_packets++;
+ stats->rx_packets++;
ndev->stats.rx_bytes += len;
-
- } else {
- ndev->stats.rx_dropped++;
+ stats->rx_bytes += len;
}
}
@@ -515,12 +536,18 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
-
+ wil_netif_rx_any(skb, ndev);
} else {
+ struct ethhdr *eth = (void *)skb->data;
+
skb->protocol = eth_type_trans(skb, ndev);
+
+ if (is_unicast_ether_addr(eth->h_dest))
+ wil_rx_reorder(wil, skb);
+ else
+ wil_netif_rx_any(skb, ndev);
}
- wil_netif_rx_any(skb, ndev);
}
wil_rx_refill(wil, v->size);
}
@@ -530,6 +557,11 @@ int wil_rx_init(struct wil6210_priv *wil)
struct vring *vring = &wil->vring_rx;
int rc;
+ if (vring->va) {
+ wil_err(wil, "Rx ring already allocated\n");
+ return -EINVAL;
+ }
+
vring->size = WIL6210_RX_RING_SIZE;
rc = wil_vring_alloc(wil, vring);
if (rc)
@@ -570,7 +602,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
.ring_size = cpu_to_le16(size),
},
.ringid = id,
- .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
+ .cidxtid = mk_cidxtid(cid, tid),
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.mac_ctrl = 0,
.to_resolution = 0,
@@ -586,6 +618,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
struct wmi_vring_cfg_done_event cmd;
} __packed reply;
struct vring *vring = &wil->vring_tx[id];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[id];
if (vring->va) {
wil_err(wil, "Tx ring [%d] already allocated\n", id);
@@ -593,11 +626,15 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
goto out;
}
+ memset(txdata, 0, sizeof(*txdata));
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
goto out;
+ wil->vring2cid_tid[id][0] = cid;
+ wil->vring2cid_tid[id][1] = tid;
+
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
@@ -613,6 +650,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
}
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ txdata->enabled = 1;
+
return 0;
out_free:
wil_vring_free(wil, vring, 1);
@@ -625,23 +664,116 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
{
struct vring *vring = &wil->vring_tx[id];
+ WARN_ON(!mutex_is_locked(&wil->mutex));
+
if (!vring->va)
return;
+ /* make sure NAPI won't touch this vring */
+ wil->vring_tx_data[id].enabled = 0;
+ if (test_bit(wil_status_napi_en, &wil->status))
+ napi_synchronize(&wil->napi_tx);
+
wil_vring_free(wil, vring, 1);
}
static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
struct sk_buff *skb)
{
- struct vring *v = &wil->vring_tx[0];
+ int i;
+ struct ethhdr *eth = (void *)skb->data;
+ int cid = wil_find_cid(wil, eth->h_dest);
+
+ if (cid < 0)
+ return NULL;
- if (v->va)
- return v;
+ if (!wil->sta[cid].data_port_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ return NULL;
+
+ /* TODO: fix for multiple TID */
+ for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+ if (wil->vring2cid_tid[i][0] == cid) {
+ struct vring *v = &wil->vring_tx[i];
+ wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
+ __func__, eth->h_dest, i);
+ if (v->va) {
+ return v;
+ } else {
+ wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
+ return NULL;
+ }
+ }
+ }
return NULL;
}
+static void wil_set_da_for_vring(struct wil6210_priv *wil,
+ struct sk_buff *skb, int vring_index)
+{
+ struct ethhdr *eth = (void *)skb->data;
+ int cid = wil->vring2cid_tid[vring_index][0];
+ memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN);
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb);
+/*
+ * Find 1-st vring and return it; set dest address for this vring in skb
+ * duplicate skb and send it to other active vrings
+ */
+static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v, *v2;
+ struct sk_buff *skb2;
+ int i;
+ u8 cid;
+
+ /* find 1-st vring eligible for data */
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ v = &wil->vring_tx[i];
+ if (!v->va)
+ continue;
+
+ cid = wil->vring2cid_tid[i][0];
+ if (!wil->sta[cid].data_port_open)
+ continue;
+
+ goto found;
+ }
+
+ wil_err(wil, "Tx while no vrings active?\n");
+
+ return NULL;
+
+found:
+ wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
+ wil_set_da_for_vring(wil, skb, i);
+
+ /* find other active vrings and duplicate skb for each */
+ for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
+ v2 = &wil->vring_tx[i];
+ if (!v2->va)
+ continue;
+ cid = wil->vring2cid_tid[i][0];
+ if (!wil->sta[cid].data_port_open)
+ continue;
+
+ skb2 = skb_copy(skb, GFP_ATOMIC);
+ if (skb2) {
+ wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
+ wil_set_da_for_vring(wil, skb2, i);
+ wil_tx_vring(wil, v2, skb2);
+ } else {
+ wil_err(wil, "skb_copy failed\n");
+ }
+ }
+
+ return v;
+}
+
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
int vring_index)
{
@@ -667,6 +799,13 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
return 0;
}
+static inline
+void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
+{
+ d->mac.d[2] |= ((nr_frags + 1) <<
+ MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+}
+
static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
struct vring_tx_desc *d,
struct sk_buff *skb)
@@ -731,8 +870,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "%s()\n", __func__);
- if (avail < vring->size/8)
- netif_tx_stop_all_queues(wil_to_ndev(wil));
if (avail < 1 + nr_frags) {
wil_err(wil, "Tx ring full. No space for %d fragments\n",
1 + nr_frags);
@@ -740,9 +877,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
}
_d = &(vring->va[i].tx);
- /* FIXME FW can accept only unicast frames for the peer */
- memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
-
pa = dma_map_single(dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
@@ -753,6 +887,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
if (unlikely(dma_mapping_error(dev, pa)))
return -EINVAL;
+ vring->ctx[i].mapped_as = wil_mapped_as_single;
/* 1-st segment */
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
/* Process TCP/UDP checksum offloading */
@@ -762,8 +897,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
goto dma_error;
}
- d->mac.d[2] |= ((nr_frags + 1) <<
- MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+ vring->ctx[i].nr_frags = nr_frags;
+ wil_tx_desc_set_nr_frags(d, nr_frags);
if (nr_frags)
*_d = *d;
@@ -778,8 +913,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
+ vring->ctx[i].mapped_as = wil_mapped_as_page;
wil_tx_desc_map(d, pa, len, vring_index);
- vring->ctx[i].mapped_as_page = 1;
+ /* no need to check return code -
+ * if it succeeded for 1-st descriptor,
+ * it will succeed here too
+ */
+ wil_tx_desc_offload_cksum_set(wil, d, skb);
*_d = *d;
}
/* for the last seg only */
@@ -808,7 +948,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
/* unmap what we have mapped */
nr_frags = f + 1; /* frags mapped + one for skb head */
for (f = 0; f < nr_frags; f++) {
- u16 dmalen;
struct wil_ctx *ctx;
i = (swhead + f) % vring->size;
@@ -816,12 +955,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
_d = &(vring->va[i].tx);
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
- pa = wil_desc_addr(&d->dma.addr);
- dmalen = le16_to_cpu(d->dma.length);
- if (ctx->mapped_as_page)
- dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
- else
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+ wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
@@ -836,12 +970,17 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct ethhdr *eth = (void *)skb->data;
struct vring *vring;
+ static bool pr_once_fw;
int rc;
wil_dbg_txrx(wil, "%s()\n", __func__);
if (!test_bit(wil_status_fwready, &wil->status)) {
- wil_err(wil, "FW not ready\n");
+ if (!pr_once_fw) {
+ wil_err(wil, "FW not ready\n");
+ pr_once_fw = true;
+ }
goto drop;
}
if (!test_bit(wil_status_fwconnected, &wil->status)) {
@@ -852,16 +991,25 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wil_err(wil, "Xmit in monitor mode not supported\n");
goto drop;
}
+ pr_once_fw = false;
/* find vring */
- vring = wil_find_tx_vring(wil, skb);
+ if (is_unicast_ether_addr(eth->h_dest)) {
+ vring = wil_find_tx_vring(wil, skb);
+ } else {
+ vring = wil_tx_bcast(wil, skb);
+ }
if (!vring) {
- wil_err(wil, "No Tx VRING available\n");
+ wil_err(wil, "No Tx VRING found for %pM\n", eth->h_dest);
goto drop;
}
/* set up vring entry */
rc = wil_tx_vring(wil, vring, skb);
+ /* do we still have enough room in the vring? */
+ if (wil_vring_avail_tx(vring) < vring->size/8)
+ netif_tx_stop_all_queues(wil_to_ndev(wil));
+
switch (rc) {
case 0:
/* statistics will be updated on the tx_complete */
@@ -891,64 +1039,82 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
struct net_device *ndev = wil_to_ndev(wil);
struct device *dev = wil_to_dev(wil);
struct vring *vring = &wil->vring_tx[ringid];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
int done = 0;
+ int cid = wil->vring2cid_tid[ringid][0];
+ struct wil_net_stats *stats = &wil->sta[cid].stats;
+ volatile struct vring_tx_desc *_d;
if (!vring->va) {
wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
return 0;
}
+ if (!txdata->enabled) {
+ wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
+ return 0;
+ }
+
wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
while (!wil_vring_is_empty(vring)) {
- volatile struct vring_tx_desc *_d =
- &vring->va[vring->swtail].tx;
- struct vring_tx_desc dd, *d = &dd;
- dma_addr_t pa;
- u16 dmalen;
+ int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
- struct sk_buff *skb = ctx->skb;
-
- *d = *_d;
+ /**
+ * For the fragmented skb, HW will set DU bit only for the
+ * last fragment. look for it
+ */
+ int lf = (vring->swtail + ctx->nr_frags) % vring->size;
+ /* TODO: check we are not past head */
- if (!(d->dma.status & TX_DMA_STATUS_DU))
+ _d = &vring->va[lf].tx;
+ if (!(_d->dma.status & TX_DMA_STATUS_DU))
break;
- dmalen = le16_to_cpu(d->dma.length);
- trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
- d->dma.error);
- wil_dbg_txrx(wil,
- "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
- vring->swtail, dmalen, d->dma.status,
- d->dma.error);
- wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
- (const void *)d, sizeof(*d), false);
-
- pa = wil_desc_addr(&d->dma.addr);
- if (ctx->mapped_as_page)
- dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
- else
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
-
- if (skb) {
- if (d->dma.error == 0) {
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- } else {
- ndev->stats.tx_errors++;
- }
+ new_swtail = (lf + 1) % vring->size;
+ while (vring->swtail != new_swtail) {
+ struct vring_tx_desc dd, *d = &dd;
+ u16 dmalen;
+ struct wil_ctx *ctx = &vring->ctx[vring->swtail];
+ struct sk_buff *skb = ctx->skb;
+ _d = &vring->va[vring->swtail].tx;
+
+ *d = *_d;
+
+ dmalen = le16_to_cpu(d->dma.length);
+ trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
+ d->dma.error);
+ wil_dbg_txrx(wil,
+ "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+ vring->swtail, dmalen, d->dma.status,
+ d->dma.error);
+ wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ wil_txdesc_unmap(dev, d, ctx);
+
+ if (skb) {
+ if (d->dma.error == 0) {
+ ndev->stats.tx_packets++;
+ stats->tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ stats->tx_bytes += skb->len;
+ } else {
+ ndev->stats.tx_errors++;
+ stats->tx_errors++;
+ }
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
+ }
+ memset(ctx, 0, sizeof(*ctx));
+ /* There is no need to touch HW descriptor:
+ * - ststus bit TX_DMA_STATUS_DU is set by design,
+ * so hardware will not try to process this desc.,
+ * - rest of descriptor will be initialized on Tx.
+ */
+ vring->swtail = wil_vring_next_tail(vring);
+ done++;
}
- memset(ctx, 0, sizeof(*ctx));
- /*
- * There is no need to touch HW descriptor:
- * - ststus bit TX_DMA_STATUS_DU is set by design,
- * so hardware will not try to process this desc.,
- * - rest of descriptor will be initialized on Tx.
- */
- vring->swtail = wil_vring_next_tail(vring);
- done++;
}
if (wil_vring_avail_tx(vring) > vring->size/4)
netif_tx_wake_all_queues(wil_to_ndev(wil));
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index b3828279204c..bc5706a4f007 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -436,4 +436,11 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
return (void *)skb->cb;
}
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+ int size, u16 ssn);
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+ struct wil_tid_ampdu_rx *r);
+
#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 1f91eaf95bbe..2a2dec75f026 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -74,23 +74,21 @@ struct RGF_ICR {
} __packed;
/* registers - FW addresses */
-#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
-#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
- #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
-#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
-#define RGF_USER_MAC_CPU_0 (0x8801fc)
+#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
+ #define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
+#define RGF_USER_MAC_CPU_0 (0x8801fc)
+#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
+#define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */
+#define RGF_USER_CLKS_CTL_0 (0x880abc)
+ #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */
#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
-
-#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
-#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
-#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
- #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
- #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
- #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
+#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
+ #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
#define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
@@ -105,13 +103,22 @@ struct RGF_ICR {
/* Interrupt moderation control */
#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
#define RGF_DMA_ITR_CNT_DATA (0x881c60)
-#define RGF_DMA_ITR_CNT_CRL (0x881C64)
+#define RGF_DMA_ITR_CNT_CRL (0x881c64)
#define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
#define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
#define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
#define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
#define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
+ #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
+ #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
+ #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+
+#define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4)
+
/* popular locations */
#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
@@ -125,6 +132,31 @@ struct RGF_ICR {
/* Hardware definitions end */
+/**
+ * mk_cidxtid - construct @cidxtid field
+ * @cid: CID value
+ * @tid: TID value
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline u8 mk_cidxtid(u8 cid, u8 tid)
+{
+ return ((tid & 0xf) << 4) | (cid & 0xf);
+}
+
+/**
+ * parse_cidxtid - parse @cidxtid field
+ * @cid: store CID value here
+ * @tid: store TID value here
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
+{
+ *cid = cidxtid & 0xf;
+ *tid = (cidxtid >> 4) & 0xf;
+}
+
struct wil6210_mbox_ring {
u32 base;
u16 entry_size; /* max. size of mbox entry, incl. all headers */
@@ -184,12 +216,19 @@ struct pending_wmi_event {
} __packed event;
};
+enum { /* for wil_ctx.mapped_as */
+ wil_mapped_as_none = 0,
+ wil_mapped_as_single = 1,
+ wil_mapped_as_page = 2,
+};
+
/**
* struct wil_ctx - software context for Vring descriptor
*/
struct wil_ctx {
struct sk_buff *skb;
- u8 mapped_as_page:1;
+ u8 nr_frags;
+ u8 mapped_as;
};
union vring_desc;
@@ -204,6 +243,14 @@ struct vring {
struct wil_ctx *ctx; /* ctx[size] - software context */
};
+/**
+ * Additional data for Tx Vring
+ */
+struct vring_tx_data {
+ int enabled;
+
+};
+
enum { /* for wil6210_priv.status */
wil_status_fwready = 0,
wil_status_fwconnecting,
@@ -211,10 +258,51 @@ enum { /* for wil6210_priv.status */
wil_status_dontscan,
wil_status_reset_done,
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+ wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
};
struct pci_dev;
+/**
+ * struct tid_ampdu_rx - TID aggregation information (Rx).
+ *
+ * @reorder_buf: buffer to reorder incoming aggregated MPDUs
+ * @reorder_time: jiffies when skb was added
+ * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
+ * @reorder_timer: releases expired frames from the reorder buffer.
+ * @last_rx: jiffies of last rx activity
+ * @head_seq_num: head sequence number in reordering buffer.
+ * @stored_mpdu_num: number of MPDUs in reordering buffer
+ * @ssn: Starting Sequence Number expected to be aggregated.
+ * @buf_size: buffer size for incoming A-MPDUs
+ * @timeout: reset timer value (in TUs).
+ * @dialog_token: dialog token for aggregation session
+ * @rcu_head: RCU head used for freeing this struct
+ * @reorder_lock: serializes access to reorder buffer, see below.
+ *
+ * This structure's lifetime is managed by RCU, assignments to
+ * the array holding it must hold the aggregation mutex.
+ *
+ * The @reorder_lock is used to protect the members of this
+ * struct, except for @timeout, @buf_size and @dialog_token,
+ * which are constant across the lifetime of the struct (the
+ * dialog token being used only for debugging).
+ */
+struct wil_tid_ampdu_rx {
+ spinlock_t reorder_lock; /* see above */
+ struct sk_buff **reorder_buf;
+ unsigned long *reorder_time;
+ struct timer_list session_timer;
+ struct timer_list reorder_timer;
+ unsigned long last_rx;
+ u16 head_seq_num;
+ u16 stored_mpdu_num;
+ u16 ssn;
+ u16 buf_size;
+ u16 timeout;
+ u8 dialog_token;
+};
+
struct wil6210_stats {
u64 tsf;
u32 snr;
@@ -226,6 +314,43 @@ struct wil6210_stats {
u16 peer_tx_sector;
};
+enum wil_sta_status {
+ wil_sta_unused = 0,
+ wil_sta_conn_pending = 1,
+ wil_sta_connected = 2,
+};
+
+#define WIL_STA_TID_NUM (16)
+
+struct wil_net_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long tx_errors;
+ unsigned long rx_dropped;
+ u16 last_mcs_rx;
+};
+
+/**
+ * struct wil_sta_info - data for peer
+ *
+ * Peer identified by its CID (connection ID)
+ * NIC performs beam forming for each peer;
+ * if no beam forming done, frame exchange is not
+ * possible.
+ */
+struct wil_sta_info {
+ u8 addr[ETH_ALEN];
+ enum wil_sta_status status;
+ struct wil_net_stats stats;
+ bool data_port_open; /* can send any data, not only EAPOL */
+ /* Rx BACK */
+ struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
+ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+ unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+};
+
struct wil6210_priv {
struct pci_dev *pdev;
int n_msi;
@@ -233,6 +358,7 @@ struct wil6210_priv {
void __iomem *csr;
ulong status;
u32 fw_version;
+ u32 hw_version;
u8 n_mids; /* number of additional MIDs as reported by FW */
/* profile */
u32 monitor_flags;
@@ -253,6 +379,7 @@ struct wil6210_priv {
struct workqueue_struct *wmi_wq_conn; /* for connect worker */
struct work_struct connect_worker;
struct work_struct disconnect_worker;
+ struct work_struct fw_error_worker; /* for FW error recovery */
struct timer_list connect_timer;
int pending_connect_cid;
struct list_head pending_wmi_ev;
@@ -267,7 +394,9 @@ struct wil6210_priv {
/* DMA related */
struct vring vring_rx;
struct vring vring_tx[WIL6210_MAX_TX_RINGS];
- u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN];
+ struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
+ u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
+ struct wil_sta_info sta[WIL6210_MAX_CID];
/* scan */
struct cfg80211_scan_request *scan_request;
@@ -329,11 +458,13 @@ void wil_if_remove(struct wil6210_priv *wil);
int wil_priv_init(struct wil6210_priv *wil);
void wil_priv_deinit(struct wil6210_priv *wil);
int wil_reset(struct wil6210_priv *wil);
+void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_link_on(struct wil6210_priv *wil);
void wil_link_off(struct wil6210_priv *wil);
int wil_up(struct wil6210_priv *wil);
int wil_down(struct wil6210_priv *wil);
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
@@ -357,8 +488,11 @@ int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
+int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason);
+void wil6210_clear_irq(struct wil6210_priv *wil);
int wil6210_init_irq(struct wil6210_priv *wil, int irq);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
void wil6210_disable_irq(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 063963ee422a..2ba56eef0c45 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -307,14 +307,14 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
u32 freq = ieee80211_channel_to_frequency(ch_no,
IEEE80211_BAND_60GHZ);
struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
- /* TODO convert LE to CPU */
- s32 signal = 0; /* TODO */
+ s32 signal = data->info.sqi;
__le16 fc = rx_mgmt_frame->frame_control;
u32 d_len = le32_to_cpu(data->info.len);
u16 d_status = le16_to_cpu(data->info.status);
- wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
- data->info.channel, data->info.mcs, data->info.snr);
+ wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+ data->info.channel, data->info.mcs, data->info.snr,
+ data->info.sqi);
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
@@ -384,6 +384,11 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
evt->assoc_req_len, evt->assoc_resp_len);
return;
}
+ if (evt->cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
+ return;
+ }
+
ch = evt->channel + 1;
wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
evt->bssid, ch, evt->cid);
@@ -439,7 +444,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
/* FIXME FW can transmit only ucast frames to peer */
/* FIXME real ring_id instead of hard coded 0 */
- memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
+ memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN);
+ wil->sta[evt->cid].status = wil_sta_conn_pending;
wil->pending_connect_cid = evt->cid;
queue_work(wil->wmi_wq_conn, &wil->connect_worker);
@@ -456,7 +462,9 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
wil->sinfo_gen++;
+ mutex_lock(&wil->mutex);
wil6210_disconnect(wil, evt->bssid);
+ mutex_unlock(&wil->mutex);
}
static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
@@ -476,11 +484,11 @@ static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
- "BF status 0x%08x SNR 0x%08x\n"
+ "BF status 0x%08x SNR 0x%08x SQI %d%%\n"
"Tx Tpt %d goodput %d Rx goodput %d\n"
"Sectors(rx:tx) my %d:%d peer %d:%d\n",
wil->stats.bf_mcs, wil->stats.tsf, evt->status,
- wil->stats.snr, le32_to_cpu(evt->tx_tpt),
+ wil->stats.snr, evt->sqi, le32_to_cpu(evt->tx_tpt),
le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
wil->stats.my_rx_sector, wil->stats.my_tx_sector,
wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
@@ -499,10 +507,16 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
int sz = eapol_len + ETH_HLEN;
struct sk_buff *skb;
struct ethhdr *eth;
+ int cid;
+ struct wil_net_stats *stats = NULL;
wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
evt->src_mac);
+ cid = wil_find_cid(wil, evt->src_mac);
+ if (cid >= 0)
+ stats = &wil->sta[cid].stats;
+
if (eapol_len > 196) { /* TODO: revisit size limit */
wil_err(wil, "EAPOL too large\n");
return;
@@ -513,6 +527,7 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
wil_err(wil, "Failed to allocate skb\n");
return;
}
+
eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
@@ -521,9 +536,15 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
skb->protocol = eth_type_trans(skb, ndev);
if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
+ ndev->stats.rx_bytes += sz;
+ if (stats) {
+ stats->rx_packets++;
+ stats->rx_bytes += sz;
+ }
} else {
ndev->stats.rx_dropped++;
+ if (stats)
+ stats->rx_dropped++;
}
}
@@ -531,9 +552,16 @@ static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wmi_data_port_open_event *evt = d;
+ u8 cid = evt->cid;
- wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid);
+ wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
+ if (cid >= ARRAY_SIZE(wil->sta)) {
+ wil_err(wil, "Link UP for invalid CID %d\n", cid);
+ return;
+ }
+
+ wil->sta[cid].data_port_open = true;
netif_carrier_on(ndev);
}
@@ -541,10 +569,17 @@ static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
{
struct net_device *ndev = wil_to_ndev(wil);
struct wmi_wbe_link_down_event *evt = d;
+ u8 cid = evt->cid;
wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
- evt->cid, le32_to_cpu(evt->reason));
+ cid, le32_to_cpu(evt->reason));
+
+ if (cid >= ARRAY_SIZE(wil->sta)) {
+ wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
+ return;
+ }
+ wil->sta[cid].data_port_open = false;
netif_carrier_off(ndev);
}
@@ -552,10 +587,42 @@ static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
int len)
{
struct wmi_vring_ba_status_event *evt = d;
+ struct wil_sta_info *sta;
+ uint i, cid;
+
+ /* TODO: use Rx BA status, not Tx one */
wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
- evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize,
- __le16_to_cpu(evt->ba_timeout));
+ evt->ringid,
+ evt->status == WMI_BA_AGREED ? "OK" : "N/A",
+ evt->agg_wsize, __le16_to_cpu(evt->ba_timeout));
+
+ if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
+ wil_err(wil, "invalid ring id %d\n", evt->ringid);
+ return;
+ }
+
+ cid = wil->vring2cid_tid[evt->ringid][0];
+ if (cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "invalid CID %d for vring %d\n", cid, evt->ringid);
+ return;
+ }
+
+ sta = &wil->sta[cid];
+ if (sta->status == wil_sta_unused) {
+ wil_err(wil, "CID %d unused\n", cid);
+ return;
+ }
+
+ wil_dbg_wmi(wil, "BACK for CID %d %pM\n", cid, sta->addr);
+ for (i = 0; i < WIL_STA_TID_NUM; i++) {
+ struct wil_tid_ampdu_rx *r = sta->tid_rx[i];
+ sta->tid_rx[i] = NULL;
+ wil_tid_ampdu_rx_free(wil, r);
+ if ((evt->status == WMI_BA_AGREED) && evt->agg_wsize)
+ sta->tid_rx[i] = wil_tid_ampdu_rx_alloc(wil,
+ evt->agg_wsize, 0);
+ }
}
static const struct {
@@ -893,6 +960,38 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
return rc;
}
+/**
+ * wmi_rxon - turn radio on/off
+ * @on: turn on if true, off otherwise
+ *
+ * Only switch radio. Channel should be set separately.
+ * No timeout for rxon - radio turned on forever unless some other call
+ * turns it off
+ */
+int wmi_rxon(struct wil6210_priv *wil, bool on)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_listen_started_event evt;
+ } __packed reply;
+
+ wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
+
+ if (on) {
+ rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
+ WMI_LISTEN_STARTED_EVENTID,
+ &reply, sizeof(reply), 100);
+ if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
+ rc = -EINVAL;
+ } else {
+ rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
+ WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20);
+ }
+
+ return rc;
+}
+
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
{
struct wireless_dev *wdev = wil->wdev;
@@ -906,6 +1005,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
},
.mid = 0, /* TODO - what is it? */
.decap_trans_type = WMI_DECAP_TYPE_802_3,
+ .reorder_type = WMI_RX_SW_REORDER,
};
struct {
struct wil6210_mbox_hdr_wmi wmi;
@@ -973,6 +1073,18 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
return 0;
}
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
+{
+ struct wmi_disconnect_sta_cmd cmd = {
+ .disconnect_reason = cpu_to_le16(reason),
+ };
+ memcpy(cmd.dst_mac, mac, ETH_ALEN);
+
+ wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
+
+ return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+}
+
void wmi_event_flush(struct wil6210_priv *wil)
{
struct pending_wmi_event *evt, *t;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index bf93ea859f2d..1fe41af81a59 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -67,7 +67,7 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
-#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
#include "atmel.h"
#define DRIVER_MAJOR 0
@@ -2273,7 +2273,7 @@ static int atmel_set_freq(struct net_device *dev,
/* Hack to fall through... */
fwrq->e = 0;
- fwrq->m = ieee80211_freq_to_dsss_chan(f);
+ fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
if ((fwrq->m > 1000) || (fwrq->e > 0))
@@ -2434,8 +2434,8 @@ static int atmel_get_range(struct net_device *dev,
range->freq[k].i = i; /* List index */
/* Values in MHz -> * 10^5 * 10 */
- range->freq[k].m = (ieee80211_dsss_chan_to_freq(i) *
- 100000);
+ range->freq[k].m = 100000 *
+ ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ);
range->freq[k++].e = 1;
}
range->num_frequency = k;
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 51ff0b198d0a..088d544ec63f 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -92,7 +92,7 @@ config B43_SDIO
# if we can do DMA.
config B43_BCMA_PIO
bool
- depends on B43_BCMA
+ depends on B43 && B43_BCMA
select BCMA_BLOCKIO
default y
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index 822aad8842f4..50517b801cb4 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -86,7 +86,7 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev,
static inline bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature)
{
- return 0;
+ return false;
}
static inline void b43_debugfs_init(void)
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c75237eb55a1..69fc3d65531a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1549,7 +1549,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
- len = min((size_t) dev->wl->current_beacon->len,
+ len = min_t(size_t, dev->wl->current_beacon->len,
0x200 - sizeof(struct b43_plcp_hdr6));
rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index abac25ee958d..f476fc337d64 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -58,41 +58,6 @@ enum b43_verbosity {
#endif
};
-
-/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
-static inline u8 b43_freq_to_channel_5ghz(int freq)
-{
- return ((freq - 5000) / 5);
-}
-static inline u8 b43_freq_to_channel_2ghz(int freq)
-{
- u8 channel;
-
- if (freq == 2484)
- channel = 14;
- else
- channel = (freq - 2407) / 5;
-
- return channel;
-}
-
-/* Lightweight function to convert a channel number to a frequency (in Mhz). */
-static inline int b43_channel_to_freq_5ghz(u8 channel)
-{
- return (5000 + (5 * channel));
-}
-static inline int b43_channel_to_freq_2ghz(u8 channel)
-{
- int freq;
-
- if (channel == 14)
- freq = 2484;
- else
- freq = 2407 + (5 * channel);
-
- return freq;
-}
-
static inline int b43_is_cck_rate(int rate)
{
return (rate == B43_CCK_RATE_1MB ||
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index f01676ac481b..dbaa51890198 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -133,9 +133,9 @@ void b43_phy_exit(struct b43_wldev *dev)
bool b43_has_hardware_pctl(struct b43_wldev *dev)
{
if (!dev->phy.hardware_power_control)
- return 0;
+ return false;
if (!dev->phy.ops->supports_hwpctl)
- return 0;
+ return false;
return dev->phy.ops->supports_hwpctl(dev);
}
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index a73ff8c9deb5..a4ff5e2a42b9 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -637,7 +637,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
- return 0;
+ return false;
b43_piorx_write32(q, B43_PIO8_RXCTL,
B43_PIO8_RXCTL_FRAMERDY);
for (i = 0; i < 10; i++) {
@@ -651,7 +651,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
- return 0;
+ return false;
b43_piorx_write16(q, B43_PIO_RXCTL,
B43_PIO_RXCTL_FRAMERDY);
for (i = 0; i < 10; i++) {
@@ -662,7 +662,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
}
}
b43dbg(q->dev->wl, "PIO RX timed out\n");
- return 1;
+ return true;
data_ready:
/* Get the preamble (RX header) */
@@ -759,7 +759,7 @@ data_ready:
b43_rx(q->dev, skb, rxhdr);
- return 1;
+ return true;
rx_error:
if (err_msg)
@@ -769,7 +769,7 @@ rx_error:
else
b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
- return 1;
+ return true;
}
void b43_pio_rx(struct b43_pio_rxqueue *q)
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index 8e8431d4eb0c..3190493bd07f 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -40,7 +40,7 @@ static int get_integer(const char *buf, size_t count)
if (count == 0)
goto out;
- count = min(count, (size_t) 10);
+ count = min_t(size_t, count, 10);
memcpy(tmp, buf, count);
ret = simple_strtol(tmp, NULL, 10);
out:
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 50e5ddb12fb3..31adb8cf0291 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -337,7 +337,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
/* iv16 */
memcpy(txhdr->iv + 10, ((u8 *) wlhdr) + wlhdr_len, 3);
} else {
- iv_len = min((size_t) info->control.hw_key->iv_len,
+ iv_len = min_t(size_t, info->control.hw_key->iv_len,
ARRAY_SIZE(txhdr->iv));
memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
}
@@ -806,7 +806,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
B43_WARN_ON(1);
/* FIXME: We don't really know which value the "chanid" contains.
* So the following assignment might be wrong. */
- status.freq = b43_channel_to_freq_5ghz(chanid);
+ status.freq =
+ ieee80211_channel_to_frequency(chanid, status.band);
break;
case B43_PHYTYPE_G:
status.band = IEEE80211_BAND_2GHZ;
@@ -819,13 +820,12 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
case B43_PHYTYPE_HT:
/* chanid is the SHM channel cookie. Which is the plain
* channel number in b43. */
- if (chanstat & B43_RX_CHAN_5GHZ) {
+ if (chanstat & B43_RX_CHAN_5GHZ)
status.band = IEEE80211_BAND_5GHZ;
- status.freq = b43_channel_to_freq_5ghz(chanid);
- } else {
+ else
status.band = IEEE80211_BAND_2GHZ;
- status.freq = b43_channel_to_freq_2ghz(chanid);
- }
+ status.freq =
+ ieee80211_channel_to_frequency(chanid, status.band);
break;
default:
B43_WARN_ON(1);
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 349c77605231..1aec2146a2bf 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -978,7 +978,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
- len = min((size_t)dev->wl->current_beacon->len,
+ len = min_t(size_t, dev->wl->current_beacon->len,
0x200 - sizeof(struct b43legacy_plcp_hdr6));
rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
@@ -1155,7 +1155,7 @@ static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev,
b43legacy_write_probe_resp_plcp(dev, 0x350, size,
&b43legacy_b_ratetable[3]);
- size = min((size_t)size,
+ size = min_t(size_t, size,
0x200 - sizeof(struct b43legacy_plcp_hdr6));
b43legacy_write_template_common(dev, probe_resp_data,
size, ram_offset,
diff --git a/drivers/net/wireless/b43legacy/sysfs.c b/drivers/net/wireless/b43legacy/sysfs.c
index 57f8b089767c..2a1da15c913b 100644
--- a/drivers/net/wireless/b43legacy/sysfs.c
+++ b/drivers/net/wireless/b43legacy/sysfs.c
@@ -42,7 +42,7 @@ static int get_integer(const char *buf, size_t count)
if (count == 0)
goto out;
- count = min(count, (size_t)10);
+ count = min_t(size_t, count, 10);
memcpy(tmp, buf, count);
ret = simple_strtol(tmp, NULL, 10);
out:
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 86588c9ff0f2..34bf3f0b729f 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -254,7 +254,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
B43legacy_TX4_MAC_KEYALG_SHIFT) &
B43legacy_TX4_MAC_KEYALG;
wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control);
- iv_len = min((size_t)info->control.hw_key->iv_len,
+ iv_len = min_t(size_t, info->control.hw_key->iv_len,
ARRAY_SIZE(txhdr->iv));
memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
} else {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 57cddee03252..1d2ceac3a221 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -24,6 +24,7 @@ ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
wl_cfg80211.o \
+ chip.o \
fwil.o \
fweh.o \
fwsignal.o \
@@ -36,8 +37,7 @@ brcmfmac-objs += \
btcoex.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
- bcmsdh.o \
- sdio_chip.o
+ bcmsdh.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index fa35b23bbaa7..a16e644e7c08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -43,7 +43,6 @@
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
-#include "sdio_chip.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
@@ -54,6 +53,12 @@
/* Maximum milliseconds to wait for F2 to come up */
#define SDIO_WAIT_F2RDY 3000
+#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
+#define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
+
+static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
+module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
+MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
{
@@ -264,26 +269,17 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
break;
}
- if (ret) {
- /*
- * SleepCSR register access can fail when
- * waking up the device so reduce this noise
- * in the logs.
- */
- if (addr != SBSDIO_FUNC1_SLEEPCSR)
- brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", fn, addr, ret);
- else
- brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
- write ? "write" : "read", fn, addr, ret);
- }
+ if (ret)
+ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", fn, addr, ret);
+
return ret;
}
static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 regsz, void *data, bool write)
{
- u8 func_num;
+ u8 func;
s32 retry = 0;
int ret;
@@ -297,9 +293,9 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
* The rest: function 1 silicon backplane core registers
*/
if ((addr & ~REG_F0_REG_MASK) == 0)
- func_num = SDIO_FUNC_0;
+ func = SDIO_FUNC_0;
else
- func_num = SDIO_FUNC_1;
+ func = SDIO_FUNC_1;
do {
if (!write)
@@ -307,16 +303,26 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
/* for retry wait for 1 ms till bus get settled down */
if (retry)
usleep_range(1000, 2000);
- ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
+ ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
data, write);
} while (ret != 0 && ret != -ENOMEDIUM &&
retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
if (ret == -ENOMEDIUM)
brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
- else if (ret != 0)
- brcmf_err("failed with %d\n", ret);
-
+ else if (ret != 0) {
+ /*
+ * SleepCSR register access can fail when
+ * waking up the device so reduce this noise
+ * in the logs.
+ */
+ if (addr != SBSDIO_FUNC1_SLEEPCSR)
+ brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", func, addr, ret);
+ else
+ brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+ write ? "write" : "read", func, addr, ret);
+ }
return ret;
}
@@ -488,7 +494,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
struct mmc_request mmc_req;
struct mmc_command mmc_cmd;
struct mmc_data mmc_dat;
- struct sg_table st;
struct scatterlist *sgl;
int ret = 0;
@@ -533,16 +538,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
pkt_offset = 0;
pkt_next = target_list->next;
- if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto exit;
- }
-
memset(&mmc_req, 0, sizeof(struct mmc_request));
memset(&mmc_cmd, 0, sizeof(struct mmc_command));
memset(&mmc_dat, 0, sizeof(struct mmc_data));
- mmc_dat.sg = st.sgl;
+ mmc_dat.sg = sdiodev->sgtable.sgl;
mmc_dat.blksz = func_blk_sz;
mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
mmc_cmd.opcode = SD_IO_RW_EXTENDED;
@@ -558,7 +558,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
while (seg_sz) {
req_sz = 0;
sg_cnt = 0;
- sgl = st.sgl;
+ sgl = sdiodev->sgtable.sgl;
/* prep sg table */
while (pkt_next != (struct sk_buff *)target_list) {
pkt_data = pkt_next->data + pkt_offset;
@@ -640,7 +640,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
}
exit:
- sg_free_table(&st);
+ sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
brcmu_pkt_buf_free_skb(pkt_next);
@@ -827,7 +827,7 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
}
if (!write)
memcpy(data, pkt->data, dsize);
- skb_trim(pkt, dsize);
+ skb_trim(pkt, 0);
/* Adjust for next transfer (if any) */
size -= dsize;
@@ -864,6 +864,29 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
return 0;
}
+static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
+{
+ uint nents;
+ int err;
+
+ if (!sdiodev->sg_support)
+ return;
+
+ nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
+ nents += (nents >> 4) + 1;
+
+ WARN_ON(nents > sdiodev->max_segment_count);
+
+ brcmf_dbg(TRACE, "nents=%d\n", nents);
+ err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
+ if (err < 0) {
+ brcmf_err("allocation failed: disable scatter-gather");
+ sdiodev->sg_support = false;
+ }
+
+ sdiodev->txglomsz = brcmf_sdiod_txglomsz;
+}
+
static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{
if (sdiodev->bus) {
@@ -881,6 +904,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
sdio_disable_func(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
+ sg_free_table(&sdiodev->sgtable);
sdiodev->sbwad = 0;
return 0;
@@ -936,6 +960,11 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
SG_MAX_SINGLE_ALLOC);
sdiodev->max_segment_size = host->max_seg_size;
+ /* allocate scatter-gather table. sg support
+ * will be disabled upon allocation failure.
+ */
+ brcmf_sdiod_sgtable_alloc(sdiodev);
+
/* try to attach to the target device */
sdiodev->bus = brcmf_sdio_probe(sdiodev);
if (!sdiodev->bus) {
@@ -960,6 +989,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
SDIO_DEVICE_ID_BROADCOM_4335_4339)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -1073,9 +1103,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
int ret = 0;
- brcmf_dbg(SDIO, "\n");
-
- atomic_set(&sdiodev->suspend, true);
+ brcmf_dbg(SDIO, "Enter\n");
sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
@@ -1083,9 +1111,12 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
return -EINVAL;
}
+ atomic_set(&sdiodev->suspend, true);
+
ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
if (ret) {
brcmf_err("Failed to set pm_flags\n");
+ atomic_set(&sdiodev->suspend, false);
return ret;
}
@@ -1099,6 +1130,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ brcmf_dbg(SDIO, "Enter\n");
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
atomic_set(&sdiodev->suspend, false);
return 0;
@@ -1115,14 +1147,15 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.remove = brcmf_ops_sdio_remove,
.name = BRCMFMAC_SDIO_PDATA_NAME,
.id_table = brcmf_sdmmc_ids,
-#ifdef CONFIG_PM_SLEEP
.drv = {
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
.pm = &brcmf_sdio_pm_ops,
- },
#endif /* CONFIG_PM_SLEEP */
+ },
};
-static int brcmf_sdio_pd_probe(struct platform_device *pdev)
+static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
{
brcmf_dbg(SDIO, "Enter\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
new file mode 100644
index 000000000000..df130ef53d1c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -0,0 +1,1034 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+
+#include <defs.h>
+#include <soc.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <chipcommon.h>
+#include "dhd_dbg.h"
+#include "chip.h"
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+
+/* PL-368 DMP definitions */
+#define DMP_DESC_TYPE_MSK 0x0000000F
+#define DMP_DESC_EMPTY 0x00000000
+#define DMP_DESC_VALID 0x00000001
+#define DMP_DESC_COMPONENT 0x00000001
+#define DMP_DESC_MASTER_PORT 0x00000003
+#define DMP_DESC_ADDRESS 0x00000005
+#define DMP_DESC_ADDRSIZE_GT32 0x00000008
+#define DMP_DESC_EOT 0x0000000F
+
+#define DMP_COMP_DESIGNER 0xFFF00000
+#define DMP_COMP_DESIGNER_S 20
+#define DMP_COMP_PARTNUM 0x000FFF00
+#define DMP_COMP_PARTNUM_S 8
+#define DMP_COMP_CLASS 0x000000F0
+#define DMP_COMP_CLASS_S 4
+#define DMP_COMP_REVISION 0xFF000000
+#define DMP_COMP_REVISION_S 24
+#define DMP_COMP_NUM_SWRAP 0x00F80000
+#define DMP_COMP_NUM_SWRAP_S 19
+#define DMP_COMP_NUM_MWRAP 0x0007C000
+#define DMP_COMP_NUM_MWRAP_S 14
+#define DMP_COMP_NUM_SPORT 0x00003E00
+#define DMP_COMP_NUM_SPORT_S 9
+#define DMP_COMP_NUM_MPORT 0x000001F0
+#define DMP_COMP_NUM_MPORT_S 4
+
+#define DMP_MASTER_PORT_UID 0x0000FF00
+#define DMP_MASTER_PORT_UID_S 8
+#define DMP_MASTER_PORT_NUM 0x000000F0
+#define DMP_MASTER_PORT_NUM_S 4
+
+#define DMP_SLAVE_ADDR_BASE 0xFFFFF000
+#define DMP_SLAVE_ADDR_BASE_S 12
+#define DMP_SLAVE_PORT_NUM 0x00000F00
+#define DMP_SLAVE_PORT_NUM_S 8
+#define DMP_SLAVE_TYPE 0x000000C0
+#define DMP_SLAVE_TYPE_S 6
+#define DMP_SLAVE_TYPE_SLAVE 0
+#define DMP_SLAVE_TYPE_BRIDGE 1
+#define DMP_SLAVE_TYPE_SWRAP 2
+#define DMP_SLAVE_TYPE_MWRAP 3
+#define DMP_SLAVE_SIZE_TYPE 0x00000030
+#define DMP_SLAVE_SIZE_TYPE_S 4
+#define DMP_SLAVE_SIZE_4K 0
+#define DMP_SLAVE_SIZE_8K 1
+#define DMP_SLAVE_SIZE_16K 2
+#define DMP_SLAVE_SIZE_DESC 3
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE 0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE 0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE 0x18002000
+#define BCM4329_RAMSIZE 0x48000
+
+/* bcm43143 */
+/* SDIO device core */
+#define BCM43143_CORE_BUS_BASE 0x18002000
+/* internal memory core */
+#define BCM43143_CORE_SOCRAM_BASE 0x18004000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM43143_CORE_ARM_BASE 0x18003000
+#define BCM43143_RAMSIZE 0x70000
+
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+ ((sbidh) & SSB_IDHIGH_RCLO))
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
+
+struct brcmf_core_priv {
+ struct brcmf_core pub;
+ u32 wrapbase;
+ struct list_head list;
+ struct brcmf_chip_priv *chip;
+};
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
+struct brcmf_chip_priv {
+ struct brcmf_chip pub;
+ const struct brcmf_buscore_ops *ops;
+ void *ctx;
+ /* assured first core is chipcommon, second core is buscore */
+ struct list_head cores;
+ u16 num_cores;
+
+ bool (*iscoreup)(struct brcmf_core_priv *core);
+ void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset);
+ void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
+ u32 postreset);
+};
+
+static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
+ struct brcmf_core *core)
+{
+ u32 regdata;
+
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
+ core->rev = SBCOREREV(regdata);
+}
+
+static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ u32 address;
+
+ ci = core->chip;
+ address = CORE_SB(core->pub.base, sbtmstatelow);
+ regdata = ci->ops->read32(ci->ctx, address);
+ regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+ SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+ return SSB_TMSLOW_CLOCK == regdata;
+}
+
+static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ bool ret;
+
+ ci = core->chip;
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+ ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+ ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+ return ret;
+}
+
+static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
+ u32 prereset, u32 reset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 val, base;
+
+ ci = core->chip;
+ base = core->pub.base;
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ if (val & SSB_TMSLOW_RESET)
+ return;
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ if ((val & SSB_TMSLOW_CLOCK) != 0) {
+ /*
+ * set target reject and spin until busy is clear
+ * (preserve core-specific bits)
+ */
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ val | SSB_TMSLOW_REJECT);
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+ SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
+ & SSB_TMSHIGH_BUSY), 100000);
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+ if (val & SSB_TMSHIGH_BUSY)
+ brcmf_err("core state still busy\n");
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+ if (val & SSB_IDLOW_INITIATOR) {
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ val |= SSB_IMSTATE_REJECT;
+ ci->ops->write32(ci->ctx,
+ CORE_SB(base, sbimstate), val);
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ udelay(1);
+ SPINWAIT((ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate)) &
+ SSB_IMSTATE_BUSY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(10);
+
+ /* clear the initiator reject bit */
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+ if (val & SSB_IDLOW_INITIATOR) {
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ val &= ~SSB_IMSTATE_REJECT;
+ ci->ops->write32(ci->ctx,
+ CORE_SB(base, sbimstate), val);
+ }
+ }
+
+ /* leave reset and reject asserted */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ udelay(1);
+}
+
+static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
+ u32 prereset, u32 reset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+
+ ci = core->chip;
+
+ /* if core is already in reset, just return */
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+ return;
+
+ /* configure reset */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+
+ /* put in reset */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
+ BCMA_RESET_CTL_RESET);
+ usleep_range(10, 20);
+
+ /* wait till reset is 1 */
+ SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
+ BCMA_RESET_CTL_RESET, 300);
+
+ /* in-reset configure */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset, u32 postreset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ u32 base;
+
+ ci = core->chip;
+ base = core->pub.base;
+ /*
+ * Must do the disable sequence first to work for
+ * arbitrary current core state.
+ */
+ brcmf_chip_sb_coredisable(core, 0, 0);
+
+ /*
+ * Now do the initialization sequence.
+ * set reset while enabling the clock and
+ * forcing them on throughout the core
+ */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_RESET);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+
+ /* clear any serror */
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+ if (regdata & SSB_TMSHIGH_SERR)
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
+
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
+ if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
+ regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
+ }
+
+ /* clear reset and allow it to propagate throughout the core */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+
+ /* leave clock enabled */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_CLOCK);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+}
+
+static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset, u32 postreset)
+{
+ struct brcmf_chip_priv *ci;
+ int count;
+
+ ci = core->chip;
+
+ /* must disable first to work for arbitrary current core state */
+ brcmf_chip_ai_coredisable(core, prereset, reset);
+
+ count = 0;
+ while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
+ BCMA_RESET_CTL_RESET) {
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
+ count++;
+ if (count > 50)
+ break;
+ usleep_range(40, 60);
+ }
+
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ postreset | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static char *brcmf_chip_name(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
+ u16 coreid, u32 base,
+ u32 wrapbase)
+{
+ struct brcmf_core_priv *core;
+
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return ERR_PTR(-ENOMEM);
+
+ core->pub.id = coreid;
+ core->pub.base = base;
+ core->chip = ci;
+ core->wrapbase = wrapbase;
+
+ list_add_tail(&core->list, &ci->cores);
+ return &core->pub;
+}
+
+#ifdef DEBUG
+/* safety check for chipinfo */
+static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core_priv *core;
+ bool need_socram = false;
+ bool has_socram = false;
+ int idx = 1;
+
+ list_for_each_entry(core, &ci->cores, list) {
+ brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
+ idx++, core->pub.id, core->pub.rev, core->pub.base,
+ core->wrapbase);
+
+ switch (core->pub.id) {
+ case BCMA_CORE_ARM_CM3:
+ need_socram = true;
+ break;
+ case BCMA_CORE_INTERNAL_MEM:
+ has_socram = true;
+ break;
+ case BCMA_CORE_ARM_CR4:
+ if (ci->pub.rambase == 0) {
+ brcmf_err("RAM base not provided with ARM CR4 core\n");
+ return -ENOMEM;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* check RAM core presence for ARM CM3 core */
+ if (need_socram && !has_socram) {
+ brcmf_err("RAM core not provided with ARM CM3 core\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+#else /* DEBUG */
+static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+ return 0;
+}
+#endif
+
+static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+{
+ switch (ci->pub.chip) {
+ case BCM4329_CHIP_ID:
+ ci->pub.ramsize = BCM4329_RAMSIZE;
+ break;
+ case BCM43143_CHIP_ID:
+ ci->pub.ramsize = BCM43143_RAMSIZE;
+ break;
+ case BCM43241_CHIP_ID:
+ ci->pub.ramsize = 0x90000;
+ break;
+ case BCM4330_CHIP_ID:
+ ci->pub.ramsize = 0x48000;
+ break;
+ case BCM4334_CHIP_ID:
+ ci->pub.ramsize = 0x80000;
+ break;
+ case BCM4335_CHIP_ID:
+ ci->pub.ramsize = 0xc0000;
+ ci->pub.rambase = 0x180000;
+ break;
+ case BCM43362_CHIP_ID:
+ ci->pub.ramsize = 0x3c000;
+ break;
+ case BCM4339_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ ci->pub.ramsize = 0xc0000;
+ ci->pub.rambase = 0x180000;
+ break;
+ default:
+ brcmf_err("unknown chip: %s\n", ci->pub.name);
+ break;
+ }
+}
+
+static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
+ u8 *type)
+{
+ u32 val;
+
+ /* read next descriptor */
+ val = ci->ops->read32(ci->ctx, *eromaddr);
+ *eromaddr += 4;
+
+ if (!type)
+ return val;
+
+ /* determine descriptor type */
+ *type = (val & DMP_DESC_TYPE_MSK);
+ if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
+ *type = DMP_DESC_ADDRESS;
+
+ return val;
+}
+
+static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
+ u32 *regbase, u32 *wrapbase)
+{
+ u8 desc;
+ u32 val;
+ u8 mpnum = 0;
+ u8 stype, sztype, wraptype;
+
+ *regbase = 0;
+ *wrapbase = 0;
+
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+ if (desc == DMP_DESC_MASTER_PORT) {
+ mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
+ wraptype = DMP_SLAVE_TYPE_MWRAP;
+ } else if (desc == DMP_DESC_ADDRESS) {
+ /* revert erom address */
+ *eromaddr -= 4;
+ wraptype = DMP_SLAVE_TYPE_SWRAP;
+ } else {
+ *eromaddr -= 4;
+ return -EILSEQ;
+ }
+
+ do {
+ /* locate address descriptor */
+ do {
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+ /* unexpected table end */
+ if (desc == DMP_DESC_EOT) {
+ *eromaddr -= 4;
+ return -EFAULT;
+ }
+ } while (desc != DMP_DESC_ADDRESS);
+
+ /* skip upper 32-bit address descriptor */
+ if (val & DMP_DESC_ADDRSIZE_GT32)
+ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+
+ sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
+
+ /* next size descriptor can be skipped */
+ if (sztype == DMP_SLAVE_SIZE_DESC) {
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+ /* skip upper size descriptor if present */
+ if (val & DMP_DESC_ADDRSIZE_GT32)
+ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+ }
+
+ /* only look for 4K register regions */
+ if (sztype != DMP_SLAVE_SIZE_4K)
+ continue;
+
+ stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
+
+ /* only regular slave and wrapper */
+ if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
+ *regbase = val & DMP_SLAVE_ADDR_BASE;
+ if (*wrapbase == 0 && stype == wraptype)
+ *wrapbase = val & DMP_SLAVE_ADDR_BASE;
+ } while (*regbase == 0 || *wrapbase == 0);
+
+ return 0;
+}
+
+static
+int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core *core;
+ u32 eromaddr;
+ u8 desc_type = 0;
+ u32 val;
+ u16 id;
+ u8 nmp, nsp, nmw, nsw, rev;
+ u32 base, wrap;
+ int err;
+
+ eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
+
+ while (desc_type != DMP_DESC_EOT) {
+ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+ if (!(val & DMP_DESC_VALID))
+ continue;
+
+ if (desc_type == DMP_DESC_EMPTY)
+ continue;
+
+ /* need a component descriptor */
+ if (desc_type != DMP_DESC_COMPONENT)
+ continue;
+
+ id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
+
+ /* next descriptor must be component as well */
+ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+ if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
+ return -EFAULT;
+
+ /* only look at cores with master port(s) */
+ nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
+ nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
+ nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
+ nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
+ rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
+
+ /* need core with ports */
+ if (nmw + nsw == 0)
+ continue;
+
+ /* try to obtain register address info */
+ err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
+ if (err)
+ continue;
+
+ /* finally a core to be added */
+ core = brcmf_chip_add_core(ci, id, base, wrap);
+ if (IS_ERR(core))
+ return PTR_ERR(core);
+
+ core->rev = rev;
+ }
+
+ return 0;
+}
+
+static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core *core;
+ u32 regdata;
+ u32 socitype;
+
+ /* Get CC core rev
+ * Chipid is assume to be at offset 0 from SI_ENUM_BASE
+ * For different chiptypes or old sdio hosts w/o chipcommon,
+ * other ways of recognition should be added here.
+ */
+ regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
+ ci->pub.chip = regdata & CID_ID_MASK;
+ ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+ brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name));
+ brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n",
+ socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name,
+ ci->pub.chiprev);
+
+ if (socitype == SOCI_SB) {
+ if (ci->pub.chip != BCM4329_CHIP_ID) {
+ brcmf_err("SB chip is not supported\n");
+ return -ENODEV;
+ }
+ ci->iscoreup = brcmf_chip_sb_iscoreup;
+ ci->coredisable = brcmf_chip_sb_coredisable;
+ ci->resetcore = brcmf_chip_sb_resetcore;
+
+ core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
+ SI_ENUM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
+ BCM4329_CORE_BUS_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
+ BCM4329_CORE_SOCRAM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
+ BCM4329_CORE_ARM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+
+ core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ } else if (socitype == SOCI_AI) {
+ ci->iscoreup = brcmf_chip_ai_iscoreup;
+ ci->coredisable = brcmf_chip_ai_coredisable;
+ ci->resetcore = brcmf_chip_ai_resetcore;
+
+ brcmf_chip_dmp_erom_scan(ci);
+ } else {
+ brcmf_err("chip backplane type %u is not supported\n",
+ socitype);
+ return -ENODEV;
+ }
+
+ brcmf_chip_get_raminfo(ci);
+
+ return brcmf_chip_cores_check(ci);
+}
+
+static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
+{
+ struct brcmf_core *core;
+ struct brcmf_core_priv *cr4;
+ u32 val;
+
+
+ core = brcmf_chip_get_core(&chip->pub, id);
+ if (!core)
+ return;
+
+ switch (id) {
+ case BCMA_CORE_ARM_CM3:
+ brcmf_chip_coredisable(core, 0, 0);
+ break;
+ case BCMA_CORE_ARM_CR4:
+ cr4 = container_of(core, struct brcmf_core_priv, pub);
+
+ /* clear all IOCTL bits except HALT bit */
+ val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL);
+ val &= ARMCR4_BCMA_IOCTL_CPUHALT;
+ brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
+ ARMCR4_BCMA_IOCTL_CPUHALT);
+ break;
+ default:
+ brcmf_err("unknown id: %u\n", id);
+ break;
+ }
+}
+
+static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_chip *pub;
+ struct brcmf_core_priv *cc;
+ u32 base;
+ u32 val;
+ int ret = 0;
+
+ pub = &chip->pub;
+ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+ base = cc->pub.base;
+
+ /* get chipcommon capabilites */
+ pub->cc_caps = chip->ops->read32(chip->ctx,
+ CORE_CC_REG(base, capabilities));
+
+ /* get pmu caps & rev */
+ if (pub->cc_caps & CC_CAP_PMU) {
+ val = chip->ops->read32(chip->ctx,
+ CORE_CC_REG(base, pmucapabilities));
+ pub->pmurev = val & PCAP_REV_MASK;
+ pub->pmucaps = val;
+ }
+
+ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
+ cc->pub.rev, pub->pmurev, pub->pmucaps);
+
+ /* execute bus core specific setup */
+ if (chip->ops->setup)
+ ret = chip->ops->setup(chip->ctx, pub);
+
+ /*
+ * Make sure any on-chip ARM is off (in case strapping is wrong),
+ * or downloaded code was already running.
+ */
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+ return ret;
+}
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+ const struct brcmf_buscore_ops *ops)
+{
+ struct brcmf_chip_priv *chip;
+ int err = 0;
+
+ if (WARN_ON(!ops->read32))
+ err = -EINVAL;
+ if (WARN_ON(!ops->write32))
+ err = -EINVAL;
+ if (WARN_ON(!ops->prepare))
+ err = -EINVAL;
+ if (WARN_ON(!ops->exit_dl))
+ err = -EINVAL;
+ if (err < 0)
+ return ERR_PTR(-EINVAL);
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&chip->cores);
+ chip->num_cores = 0;
+ chip->ops = ops;
+ chip->ctx = ctx;
+
+ err = ops->prepare(ctx);
+ if (err < 0)
+ goto fail;
+
+ err = brcmf_chip_recognition(chip);
+ if (err < 0)
+ goto fail;
+
+ err = brcmf_chip_setup(chip);
+ if (err < 0)
+ goto fail;
+
+ return &chip->pub;
+
+fail:
+ brcmf_chip_detach(&chip->pub);
+ return ERR_PTR(err);
+}
+
+void brcmf_chip_detach(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+ struct brcmf_core_priv *tmp;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry_safe(core, tmp, &chip->cores, list) {
+ list_del(&core->list);
+ kfree(core);
+ }
+ kfree(chip);
+}
+
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry(core, &chip->cores, list)
+ if (core->pub.id == coreid)
+ return &core->pub;
+
+ return NULL;
+}
+
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *cc;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+ if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
+ return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
+ return &cc->pub;
+}
+
+bool brcmf_chip_iscoreup(struct brcmf_core *pub)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ return core->chip->iscoreup(core);
+}
+
+void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ core->chip->coredisable(core, prereset, reset);
+}
+
+void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
+ u32 postreset)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ core->chip->resetcore(core, prereset, reset, postreset);
+}
+
+static void
+brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN);
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+ brcmf_chip_resetcore(core, 0, 0, 0);
+}
+
+static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+ if (!brcmf_chip_iscoreup(core)) {
+ brcmf_err("SOCRAM core is down after reset?\n");
+ return false;
+ }
+
+ chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
+ brcmf_chip_resetcore(core, 0, 0, 0);
+
+ return true;
+}
+
+static inline void
+brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN);
+}
+
+static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
+{
+ struct brcmf_core *core;
+
+ chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
+
+ /* restore ARM */
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
+ brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
+
+ return true;
+}
+
+void brcmf_chip_enter_download(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core *arm;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+ if (arm) {
+ brcmf_chip_cr4_enterdl(chip);
+ return;
+ }
+
+ brcmf_chip_cm3_enterdl(chip);
+}
+
+bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core *arm;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+ if (arm)
+ return brcmf_chip_cr4_exitdl(chip, rstvec);
+
+ return brcmf_chip_cm3_exitdl(chip);
+}
+
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
+{
+ u32 base, addr, reg, pmu_cc3_mask = ~0;
+ struct brcmf_chip_priv *chip;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* old chips with PMU version less than 17 don't support save restore */
+ if (pub->pmurev < 17)
+ return false;
+
+ base = brcmf_chip_get_chipcommon(pub)->base;
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+
+ switch (pub->chip) {
+ case BCM4354_CHIP_ID:
+ /* explicitly check SR engine enable bit */
+ pmu_cc3_mask = BIT(2);
+ /* fall-through */
+ case BCM43241_CHIP_ID:
+ case BCM4335_CHIP_ID:
+ case BCM4339_CHIP_ID:
+ /* read PMU chipcontrol register 3 */
+ addr = CORE_CC_REG(base, chipcontrol_addr);
+ chip->ops->write32(chip->ctx, addr, 3);
+ addr = CORE_CC_REG(base, chipcontrol_data);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & pmu_cc3_mask) != 0;
+ default:
+ addr = CORE_CC_REG(base, pmucapabilities_ext);
+ reg = chip->ops->read32(chip->ctx, addr);
+ if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+ return false;
+
+ addr = CORE_CC_REG(base, retention_ctl);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+ PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
new file mode 100644
index 000000000000..c32908da90c8
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMF_CHIP_H
+#define BRCMF_CHIP_H
+
+#include <linux/types.h>
+
+#define CORE_CC_REG(base, field) \
+ (base + offsetof(struct chipcregs, field))
+
+/**
+ * struct brcmf_chip - chip level information.
+ *
+ * @chip: chip identifier.
+ * @chiprev: chip revision.
+ * @cc_caps: chipcommon core capabilities.
+ * @pmucaps: PMU capabilities.
+ * @pmurev: PMU revision.
+ * @rambase: RAM base address (only applicable for ARM CR4 chips).
+ * @ramsize: amount of RAM on chip.
+ * @name: string representation of the chip identifier.
+ */
+struct brcmf_chip {
+ u32 chip;
+ u32 chiprev;
+ u32 cc_caps;
+ u32 pmucaps;
+ u32 pmurev;
+ u32 rambase;
+ u32 ramsize;
+ char name[8];
+};
+
+/**
+ * struct brcmf_core - core related information.
+ *
+ * @id: core identifier.
+ * @rev: core revision.
+ * @base: base address of core register space.
+ */
+struct brcmf_core {
+ u16 id;
+ u16 rev;
+ u32 base;
+};
+
+/**
+ * struct brcmf_buscore_ops - buscore specific callbacks.
+ *
+ * @read32: read 32-bit value over bus.
+ * @write32: write 32-bit value over bus.
+ * @prepare: prepare bus for core configuration.
+ * @setup: bus-specific core setup.
+ * @exit_dl: exit download state.
+ * The callback should use the provided @rstvec when non-zero.
+ */
+struct brcmf_buscore_ops {
+ u32 (*read32)(void *ctx, u32 addr);
+ void (*write32)(void *ctx, u32 addr, u32 value);
+ int (*prepare)(void *ctx);
+ int (*setup)(void *ctx, struct brcmf_chip *chip);
+ void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
+};
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+ const struct brcmf_buscore_ops *ops);
+void brcmf_chip_detach(struct brcmf_chip *chip);
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
+bool brcmf_chip_iscoreup(struct brcmf_core *core);
+void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
+void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
+ u32 postreset);
+void brcmf_chip_enter_download(struct brcmf_chip *ci);
+bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
+
+#endif /* BRCMF_AXIDMP_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index d4d966beb840..7d28cd385092 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1040,12 +1040,12 @@ void brcmf_detach(struct device *dev)
brcmf_cfg80211_detach(drvr->config);
+ brcmf_fws_deinit(drvr);
+
brcmf_bus_detach(drvr);
brcmf_proto_detach(drvr);
- brcmf_fws_deinit(drvr);
-
brcmf_debugfs_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index ddaa9efd053d..13c89a0c4ba7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/semaphore.h>
@@ -40,7 +41,7 @@
#include <brcm_hw_ids.h>
#include <soc.h>
#include "sdio_host.h"
-#include "sdio_chip.h"
+#include "chip.h"
#include "nvram.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -112,8 +113,6 @@ struct rte_console {
#define BRCMF_TXBOUND 20 /* Default for max tx frames in
one scheduling */
-#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
-
#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
#define MEMBLOCK 2048 /* Block size used for downloading
@@ -156,6 +155,34 @@ struct rte_console {
/* manfid tuple length, include tuple, link bytes */
#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP 0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT 0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP 0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ 0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ 0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL 0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL 0x80
+#define SBSDIO_CSR_MASK 0x1F
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
/* intstatus */
#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
@@ -276,7 +303,6 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
* when idle
*/
@@ -433,10 +459,11 @@ struct brcmf_sdio {
bool alp_only; /* Don't use HT clock (ALP only) */
u8 *ctrl_frame_buf;
- u32 ctrl_frame_len;
+ u16 ctrl_frame_len;
bool ctrl_frame_stat;
- spinlock_t txqlock;
+ spinlock_t txq_lock; /* protect bus->txq */
+ struct semaphore tx_seq_lock; /* protect bus->tx_seq */
wait_queue_head_t ctrl_wait;
wait_queue_head_t dcmd_resp_wait;
@@ -483,16 +510,58 @@ static const uint max_roundup = 512;
#define ALIGNMENT 4
-static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
-module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
-MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
-
enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_NORMAL,
BRCMF_SDIO_FT_SUPER,
BRCMF_SDIO_FT_SUB,
};
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+ u8 strength; /* Pad Drive Strength in mA */
+ u8 sel; /* Chip-specific select value */
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+ {3, 0x3},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
+ {16, 0x7},
+ {12, 0x5},
+ {8, 0x3},
+ {4, 0x1}
+};
+
#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
@@ -511,6 +580,8 @@ enum brcmf_sdio_frmtype {
#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
+#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin"
+#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt"
MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
@@ -530,6 +601,8 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
struct brcmf_firmware_names {
u32 chipid;
@@ -555,7 +628,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
{ BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
{ BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
- { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
+ { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
+ { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
};
@@ -618,27 +692,24 @@ static bool data_ok(struct brcmf_sdio *bus)
* Reads a register in the SDIO hardware block. This block occupies a series of
* adresses on the 32 bit backplane bus.
*/
-static int
-r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
+static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
{
- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ struct brcmf_core *core;
int ret;
- *regvar = brcmf_sdiod_regrl(bus->sdiodev,
- bus->ci->c_inf[idx].base + offset, &ret);
+ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
return ret;
}
-static int
-w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
+static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
{
- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ struct brcmf_core *core;
int ret;
- brcmf_sdiod_regwl(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- regval, &ret);
+ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
return ret;
}
@@ -650,16 +721,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
int err = 0;
int try_cnt = 0;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "Enter: on=%d\n", on);
wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
/* 1st KSO write goes to AOS wake up core if device is asleep */
brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
wr_val, &err);
- if (err) {
- brcmf_err("SDIO_AOS KSO write error: %d\n", err);
- return err;
- }
if (on) {
/* device WAKEUP through KSO:
@@ -689,18 +756,22 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
&err);
if (((rd_val & bmask) == cmp_val) && !err)
break;
- brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
- try_cnt, MAX_KSO_ATTEMPTS, err);
+
udelay(KSO_WAIT_US);
brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
wr_val, &err);
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
+ if (try_cnt > 2)
+ brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
+ rd_val, err);
+
+ if (try_cnt > MAX_KSO_ATTEMPTS)
+ brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
+
return err;
}
-#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
-
#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
/* Turn backplane clock on or off */
@@ -799,7 +870,6 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
}
#endif /* defined (DEBUG) */
- bus->activity = true;
} else {
clkreq = 0;
@@ -899,8 +969,9 @@ static int
brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
{
int err = 0;
- brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(SDIO, "request %s currently %s\n",
+ u8 clkcsr;
+
+ brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
(sleep ? "SLEEP" : "WAKE"),
(bus->sleeping ? "SLEEP" : "WAKE"));
@@ -917,8 +988,20 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
atomic_read(&bus->ipend) > 0 ||
(!atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
- data_ok(bus)))
- return -EBUSY;
+ data_ok(bus))) {
+ err = -EBUSY;
+ goto done;
+ }
+
+ clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
+ if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
+ brcmf_dbg(SDIO, "no clock, set ALP\n");
+ brcmf_sdiod_regwb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_ALP_AVAIL_REQ, &err);
+ }
err = brcmf_sdio_kso_control(bus, false);
/* disable watchdog */
if (!err)
@@ -935,7 +1018,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
} else {
brcmf_err("error while changing bus sleep state %d\n",
err);
- return err;
+ goto done;
}
}
@@ -947,11 +1030,92 @@ end:
} else {
brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
}
-
+done:
+ brcmf_dbg(SDIO, "Exit: err=%d\n", err);
return err;
}
+#ifdef DEBUG
+static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+{
+ return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+}
+
+static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh)
+{
+ u32 addr;
+ int rv;
+ u32 shaddr = 0;
+ struct sdpcm_shared_le sh_le;
+ __le32 addr_le;
+
+ shaddr = bus->ci->rambase + bus->ramsize - 4;
+
+ /*
+ * Read last word in socram to determine
+ * address of sdpcm_shared structure
+ */
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_bus_sleep(bus, false, false);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+ sdio_release_host(bus->sdiodev->func[1]);
+ if (rv < 0)
+ return rv;
+
+ addr = le32_to_cpu(addr_le);
+
+ brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (!brcmf_sdio_valid_shared_address(addr)) {
+ brcmf_err("invalid sdpcm_shared address 0x%08X\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /* Read hndrte_shared structure */
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+ sizeof(struct sdpcm_shared_le));
+ if (rv < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = le32_to_cpu(sh_le.flags);
+ sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+ sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+ sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+ sh->assert_line = le32_to_cpu(sh_le.assert_line);
+ sh->console_addr = le32_to_cpu(sh_le.console_addr);
+ sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
+ brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
+ SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+ struct sdpcm_shared sh;
+
+ if (brcmf_sdio_readshared(bus, &sh) == 0)
+ bus->console_addr = sh.console_addr;
+}
+#else
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+}
+#endif /* DEBUG */
+
static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
@@ -995,6 +1159,12 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
else
brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
bus->sdpcm_ver);
+
+ /*
+ * Retrieve console state address now that firmware should have
+ * updated it.
+ */
+ brcmf_sdio_get_console_addr(bus);
}
/*
@@ -1083,6 +1253,28 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
bus->cur_read.len = 0;
}
+static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
+{
+ struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
+ u8 i, hi, lo;
+
+ /* On failure, abort the command and terminate the frame */
+ brcmf_err("sdio error, abort command and terminate frame\n");
+ bus->sdcnt.tx_sderrs++;
+
+ brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
+ bus->sdcnt.f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+ bus->sdcnt.f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+}
+
/* return total length of buffer chain */
static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
{
@@ -1955,7 +2147,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
memcpy(pkt_pad->data,
pkt->data + pkt->len - tail_chop,
tail_chop);
- *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+ *(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
skb_trim(pkt, pkt->len - tail_chop);
skb_trim(pkt_pad, tail_pad + tail_chop);
__skb_queue_after(pktq, pkt, pkt_pad);
@@ -2003,7 +2195,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
* already properly aligned and does not
* need an sdpcm header.
*/
- if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
+ if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
continue;
/* align packet data pointer */
@@ -2037,10 +2229,10 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
if (BRCMF_BYTES_ON() &&
((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
(BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
- brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
+ brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
"Tx Frame:\n");
else if (BRCMF_HDRS_ON())
- brcmf_dbg_hex_dump(true, pkt_next,
+ brcmf_dbg_hex_dump(true, pkt_next->data,
head_pad + bus->tx_hdrlen,
"Tx Header:\n");
}
@@ -2067,11 +2259,11 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
u8 *hdr;
u32 dat_offset;
u16 tail_pad;
- u32 dummy_flags, chop_len;
+ u16 dummy_flags, chop_len;
struct sk_buff *pkt_next, *tmp, *pkt_prev;
skb_queue_walk_safe(pktq, pkt_next, tmp) {
- dummy_flags = *(u32 *)(pkt_next->cb);
+ dummy_flags = *(u16 *)(pkt_next->cb);
if (dummy_flags & ALIGN_SKB_FLAG) {
chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
if (chop_len) {
@@ -2100,7 +2292,6 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
uint chan)
{
int ret;
- int i;
struct sk_buff *pkt_next, *tmp;
brcmf_dbg(TRACE, "Enter\n");
@@ -2113,28 +2304,9 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
bus->sdcnt.f2txdata++;
- if (ret < 0) {
- /* On failure, abort the command and terminate the frame */
- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
- ret);
- bus->sdcnt.tx_sderrs++;
+ if (ret < 0)
+ brcmf_sdio_txfail(bus);
- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
- bus->sdcnt.f1regdata++;
-
- for (i = 0; i < 3; i++) {
- u8 hi, lo;
- hi = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->sdcnt.f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
- }
sdio_release_host(bus->sdiodev->func[1]);
done:
@@ -2164,13 +2336,15 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* Send frames until the limit or some other event */
for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
pkt_num = 1;
- __skb_queue_head_init(&pktq);
+ if (down_interruptible(&bus->tx_seq_lock))
+ return cnt;
if (bus->txglom)
pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
- brcmf_sdio_txglomsz);
+ bus->sdiodev->txglomsz);
pkt_num = min_t(u32, pkt_num,
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
- spin_lock_bh(&bus->txqlock);
+ __skb_queue_head_init(&pktq);
+ spin_lock_bh(&bus->txq_lock);
for (i = 0; i < pkt_num; i++) {
pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
&prec_out);
@@ -2178,15 +2352,19 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
break;
__skb_queue_tail(&pktq, pkt);
}
- spin_unlock_bh(&bus->txqlock);
- if (i == 0)
+ spin_unlock_bh(&bus->txq_lock);
+ if (i == 0) {
+ up(&bus->tx_seq_lock);
break;
+ }
ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
+ up(&bus->tx_seq_lock);
+
cnt += i;
/* In poll mode, need to check for other events */
- if (!bus->intr && cnt) {
+ if (!bus->intr) {
/* Check device status, signal pending interrupt */
sdio_claim_host(bus->sdiodev->func[1]);
ret = r_sdreg32(bus, &intstatus,
@@ -2211,6 +2389,68 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
return cnt;
}
+static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
+{
+ u8 doff;
+ u16 pad;
+ uint retries = 0;
+ struct brcmf_sdio_hdrinfo hd_info = {0};
+ int ret;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Back the pointer to make room for bus header */
+ frame -= bus->tx_hdrlen;
+ len += bus->tx_hdrlen;
+
+ /* Add alignment padding (optional for ctl frames) */
+ doff = ((unsigned long)frame % bus->head_align);
+ if (doff) {
+ frame -= doff;
+ len += doff;
+ memset(frame + bus->tx_hdrlen, 0, doff);
+ }
+
+ /* Round send length to next SDIO block */
+ pad = 0;
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad > bus->roundup) || (pad >= bus->blocksize))
+ pad = 0;
+ } else if (len % bus->head_align) {
+ pad = bus->head_align - (len % bus->head_align);
+ }
+ len += pad;
+
+ hd_info.len = len - pad;
+ hd_info.channel = SDPCM_CONTROL_CHANNEL;
+ hd_info.dat_offset = doff + bus->tx_hdrlen;
+ hd_info.seq_num = bus->tx_seq;
+ hd_info.lastfrm = true;
+ hd_info.tail_pad = pad;
+ brcmf_sdio_hdpack(bus, frame, &hd_info);
+
+ if (bus->txglom)
+ brcmf_sdio_update_hwhdr(frame, len);
+
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
+ frame, len, "Tx Frame:\n");
+ brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
+ BRCMF_HDRS_ON(),
+ frame, min_t(u16, len, 16), "TxHdr:\n");
+
+ do {
+ ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
+
+ if (ret < 0)
+ brcmf_sdio_txfail(bus);
+ else
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
+ } while (ret < 0 && retries++ < TXRETRIES);
+
+ return ret;
+}
+
static void brcmf_sdio_bus_stop(struct device *dev)
{
u32 local_hostintmask;
@@ -2292,21 +2532,29 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
}
}
+static void atomic_orr(int val, atomic_t *v)
+{
+ int old_val;
+
+ old_val = atomic_read(v);
+ while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
+ old_val = atomic_read(v);
+}
+
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
- u8 idx;
+ struct brcmf_core *buscore;
u32 addr;
unsigned long val;
- int n, ret;
+ int ret;
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- addr = bus->ci->c_inf[idx].base +
- offsetof(struct sdpcmd_regs, intstatus);
+ buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
if (ret != 0)
- val = 0;
+ return ret;
val &= bus->hostintmask;
atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
@@ -2315,13 +2563,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
if (val) {
brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
bus->sdcnt.f1regdata++;
- }
-
- if (ret) {
- atomic_set(&bus->intstatus, 0);
- } else if (val) {
- for_each_set_bit(n, &val, 32)
- set_bit(n, (unsigned long *)&bus->intstatus.counter);
+ atomic_orr(val, &bus->intstatus);
}
return ret;
@@ -2331,10 +2573,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{
u32 newstatus = 0;
unsigned long intstatus;
- uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
uint txlimit = bus->txbound; /* Tx frames to send before resched */
- uint framecnt = 0; /* Temporary counter of tx/rx frames */
- int err = 0, n;
+ uint framecnt; /* Temporary counter of tx/rx frames */
+ int err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -2431,70 +2672,38 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
intstatus &= ~I_HMB_FRAME_IND;
/* On frame indication, read available frames */
- if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
- framecnt = brcmf_sdio_readframes(bus, rxlimit);
+ if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
+ brcmf_sdio_readframes(bus, bus->rxbound);
if (!bus->rxpending)
intstatus &= ~I_HMB_FRAME_IND;
- rxlimit -= min(framecnt, rxlimit);
}
/* Keep still-pending events for next scheduling */
- if (intstatus) {
- for_each_set_bit(n, &intstatus, 32)
- set_bit(n, (unsigned long *)&bus->intstatus.counter);
- }
+ if (intstatus)
+ atomic_orr(intstatus, &bus->intstatus);
brcmf_sdio_clrintr(bus);
- if (data_ok(bus) && bus->ctrl_frame_stat &&
- (bus->clkstate == CLK_AVAIL)) {
- int i;
-
- sdio_claim_host(bus->sdiodev->func[1]);
- err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
- (u32)bus->ctrl_frame_len);
-
- if (err < 0) {
- /* On failure, abort the command and
- terminate the frame */
- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
- err);
- bus->sdcnt.tx_sderrs++;
-
- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
-
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, &err);
- bus->sdcnt.f1regdata++;
-
- for (i = 0; i < 3; i++) {
- u8 hi, lo;
- hi = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI,
- &err);
- lo = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO,
- &err);
- bus->sdcnt.f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
+ if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
+ (down_interruptible(&bus->tx_seq_lock) == 0)) {
+ if (data_ok(bus)) {
+ sdio_claim_host(bus->sdiodev->func[1]);
+ err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
+ bus->ctrl_frame_len);
+ sdio_release_host(bus->sdiodev->func[1]);
- } else {
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
+ bus->ctrl_frame_stat = false;
+ brcmf_sdio_wait_event_wakeup(bus);
}
- sdio_release_host(bus->sdiodev->func[1]);
- bus->ctrl_frame_stat = false;
- brcmf_sdio_wait_event_wakeup(bus);
+ up(&bus->tx_seq_lock);
}
/* Send queued frames (limit 1 if rx may still be pending) */
- else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
- brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
- && data_ok(bus)) {
+ if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
+ data_ok(bus)) {
framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
txlimit;
- framecnt = brcmf_sdio_sendfromq(bus, framecnt);
- txlimit -= framecnt;
+ brcmf_sdio_sendfromq(bus, framecnt);
}
if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
@@ -2504,19 +2713,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
atomic_read(&bus->ipend) > 0 ||
(!atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
- data_ok(bus)) || PKT_AVAILABLE()) {
+ data_ok(bus))) {
atomic_inc(&bus->dpc_tskcnt);
}
-
- /* If we're done for now, turn off clock request. */
- if ((bus->clkstate != CLK_PENDING)
- && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
- bus->activity = false;
- brcmf_dbg(SDIO, "idle state\n");
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, true, false);
- sdio_release_host(bus->sdiodev->func[1]);
- }
}
static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
@@ -2531,15 +2730,12 @@ static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
- uint datalen, prec;
+ uint prec;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
- ulong flags;
- brcmf_dbg(TRACE, "Enter\n");
-
- datalen = pkt->len;
+ brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
/* Add space for the header */
skb_push(pkt, bus->tx_hdrlen);
@@ -2553,7 +2749,9 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->sdcnt.fcqueued++;
/* Priority based enq */
- spin_lock_irqsave(&bus->txqlock, flags);
+ spin_lock_bh(&bus->txq_lock);
+ /* reset bus_flags in packet cb */
+ *(u16 *)(pkt->cb) = 0;
if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
skb_pull(pkt, bus->tx_hdrlen);
brcmf_err("out of bus->txq !!!\n");
@@ -2566,7 +2764,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->txoff = true;
brcmf_txflowblock(bus->sdiodev->dev, true);
}
- spin_unlock_irqrestore(&bus->txqlock, flags);
+ spin_unlock_bh(&bus->txq_lock);
#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2661,110 +2859,27 @@ break2:
}
#endif /* DEBUG */
-static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
-{
- int i;
- int ret;
-
- bus->ctrl_frame_stat = false;
- ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
-
- if (ret < 0) {
- /* On failure, abort the command and terminate the frame */
- brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
- ret);
- bus->sdcnt.tx_sderrs++;
-
- brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
-
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
- bus->sdcnt.f1regdata++;
-
- for (i = 0; i < 3; i++) {
- u8 hi, lo;
- hi = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->sdcnt.f1regdata += 2;
- if (hi == 0 && lo == 0)
- break;
- }
- return ret;
- }
-
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
-
- return ret;
-}
-
static int
brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
- u8 *frame;
- u16 len, pad;
- uint retries = 0;
- u8 doff = 0;
- int ret = -1;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
- struct brcmf_sdio_hdrinfo hd_info = {0};
+ int ret = -1;
brcmf_dbg(TRACE, "Enter\n");
- /* Back the pointer to make a room for bus header */
- frame = msg - bus->tx_hdrlen;
- len = (msglen += bus->tx_hdrlen);
-
- /* Add alignment padding (optional for ctl frames) */
- doff = ((unsigned long)frame % bus->head_align);
- if (doff) {
- frame -= doff;
- len += doff;
- msglen += doff;
- memset(frame, 0, doff + bus->tx_hdrlen);
- }
- /* precondition: doff < bus->head_align */
- doff += bus->tx_hdrlen;
-
- /* Round send length to next SDIO block */
- pad = 0;
- if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
- pad = bus->blocksize - (len % bus->blocksize);
- if ((pad > bus->roundup) || (pad >= bus->blocksize))
- pad = 0;
- } else if (len % bus->head_align) {
- pad = bus->head_align - (len % bus->head_align);
- }
- len += pad;
-
- /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
-
- /* Make sure backplane clock is on */
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, false, false);
- sdio_release_host(bus->sdiodev->func[1]);
-
- hd_info.len = (u16)msglen;
- hd_info.channel = SDPCM_CONTROL_CHANNEL;
- hd_info.dat_offset = doff;
- hd_info.seq_num = bus->tx_seq;
- hd_info.lastfrm = true;
- hd_info.tail_pad = pad;
- brcmf_sdio_hdpack(bus, frame, &hd_info);
-
- if (bus->txglom)
- brcmf_sdio_update_hwhdr(frame, len);
+ if (down_interruptible(&bus->tx_seq_lock))
+ return -EINTR;
if (!data_ok(bus)) {
brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
bus->tx_max, bus->tx_seq);
- bus->ctrl_frame_stat = true;
+ up(&bus->tx_seq_lock);
/* Send from dpc */
- bus->ctrl_frame_buf = frame;
- bus->ctrl_frame_len = len;
+ bus->ctrl_frame_buf = msg;
+ bus->ctrl_frame_len = msglen;
+ bus->ctrl_frame_stat = true;
wait_event_interruptible_timeout(bus->ctrl_wait,
!bus->ctrl_frame_stat,
@@ -2775,31 +2890,18 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
ret = 0;
} else {
brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
+ bus->ctrl_frame_stat = false;
+ if (down_interruptible(&bus->tx_seq_lock))
+ return -EINTR;
ret = -1;
}
}
-
if (ret == -1) {
- brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
- frame, len, "Tx Frame:\n");
- brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
- BRCMF_HDRS_ON(),
- frame, min_t(u16, len, 16), "TxHdr:\n");
-
- do {
- sdio_claim_host(bus->sdiodev->func[1]);
- ret = brcmf_sdio_tx_frame(bus, frame, len);
- sdio_release_host(bus->sdiodev->func[1]);
- } while (ret < 0 && retries++ < TXRETRIES);
- }
-
- if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
- atomic_read(&bus->dpc_tskcnt) == 0) {
- bus->activity = false;
sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_dbg(INFO, "idle\n");
- brcmf_sdio_clkctl(bus, CLK_NONE, true);
+ brcmf_sdio_bus_sleep(bus, false, false);
+ ret = brcmf_sdio_tx_ctrlframe(bus, msg, msglen);
sdio_release_host(bus->sdiodev->func[1]);
+ up(&bus->tx_seq_lock);
}
if (ret)
@@ -2811,72 +2913,6 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
}
#ifdef DEBUG
-static inline bool brcmf_sdio_valid_shared_address(u32 addr)
-{
- return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
-}
-
-static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
- struct sdpcm_shared *sh)
-{
- u32 addr;
- int rv;
- u32 shaddr = 0;
- struct sdpcm_shared_le sh_le;
- __le32 addr_le;
-
- shaddr = bus->ci->rambase + bus->ramsize - 4;
-
- /*
- * Read last word in socram to determine
- * address of sdpcm_shared structure
- */
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, false, false);
- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
- sdio_release_host(bus->sdiodev->func[1]);
- if (rv < 0)
- return rv;
-
- addr = le32_to_cpu(addr_le);
-
- brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
-
- /*
- * Check if addr is valid.
- * NVRAM length at the end of memory should have been overwritten.
- */
- if (!brcmf_sdio_valid_shared_address(addr)) {
- brcmf_err("invalid sdpcm_shared address 0x%08X\n",
- addr);
- return -EINVAL;
- }
-
- /* Read hndrte_shared structure */
- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
- sizeof(struct sdpcm_shared_le));
- if (rv < 0)
- return rv;
-
- /* Endianness */
- sh->flags = le32_to_cpu(sh_le.flags);
- sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
- sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
- sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
- sh->assert_line = le32_to_cpu(sh_le.assert_line);
- sh->console_addr = le32_to_cpu(sh_le.console_addr);
- sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
-
- if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
- brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
- SDPCM_SHARED_VERSION,
- sh->flags & SDPCM_SHARED_VERSION_MASK);
- return -EPROTO;
- }
-
- return 0;
-}
-
static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
struct sdpcm_shared *sh, char __user *data,
size_t count)
@@ -3106,6 +3142,8 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
debugfs_create_file("forensics", S_IRUGO, dentry, bus,
&brcmf_sdio_forensic_ops);
brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
+ debugfs_create_u32("console_interval", 0644, dentry,
+ &bus->console_interval);
}
#else
static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
@@ -3224,32 +3262,17 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
const struct firmware *fw)
{
int err;
- int offset;
- int address;
- int len;
brcmf_dbg(TRACE, "Enter\n");
- err = 0;
- offset = 0;
- address = bus->ci->rambase;
- while (offset < fw->size) {
- len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
- fw->size - offset;
- err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
- (u8 *)&fw->data[offset], len);
- if (err) {
- brcmf_err("error %d on writing %d membytes at 0x%08x\n",
- err, len, address);
- return err;
- }
- offset += len;
- address += len;
- }
- if (!err)
- if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
- (u8 *)fw->data, fw->size))
- err = -EIO;
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
+ (u8 *)fw->data, fw->size);
+ if (err)
+ brcmf_err("error %d on writing %d membytes at 0x%08x\n",
+ err, (int)fw->size, bus->ci->rambase);
+ else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+ (u8 *)fw->data, fw->size))
+ err = -EIO;
return err;
}
@@ -3292,7 +3315,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Keep arm in reset */
- brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+ brcmf_chip_enter_download(bus->ci);
fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
if (fw == NULL) {
@@ -3324,7 +3347,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
}
/* Take arm out of reset */
- if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
+ if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
brcmf_err("error getting out of ARM core reset\n");
goto err;
}
@@ -3339,40 +3362,6 @@ err:
return bcmerror;
}
-static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
-{
- u32 addr, reg, pmu_cc3_mask = ~0;
- int err;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* old chips with PMU version less than 17 don't support save restore */
- if (bus->ci->pmurev < 17)
- return false;
-
- switch (bus->ci->chip) {
- case BCM43241_CHIP_ID:
- case BCM4335_CHIP_ID:
- case BCM4339_CHIP_ID:
- /* read PMU chipcontrol register 3 */
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
- brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
- return (reg & pmu_cc3_mask) != 0;
- default:
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
- if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
- return false;
-
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
- return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
- PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
- }
-}
-
static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
{
int err = 0;
@@ -3424,7 +3413,7 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
/* KSO bit added in SDIO core rev 12 */
- if (bus->ci->c_inf[1].rev < 12)
+ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
return 0;
val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
@@ -3455,15 +3444,13 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
struct brcmf_sdio *bus = sdiodev->bus;
uint pad_size;
u32 value;
- u8 idx;
int err;
/* the commands below use the terms tx and rx from
* a device perspective, ie. bus:txglom affects the
* bus transfers from device to host.
*/
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- if (bus->ci->c_inf[idx].rev < 12) {
+ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
/* for sdio core rev < 12, disable txgloming */
value = 0;
err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3570,7 +3557,7 @@ static int brcmf_sdio_bus_init(struct device *dev)
ret = -ENODEV;
}
- if (brcmf_sdio_sr_capable(bus)) {
+ if (brcmf_chip_sr_capable(bus->ci)) {
brcmf_sdio_sr_init(bus);
} else {
/* Restore previous clock setting */
@@ -3714,11 +3701,175 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
datawork);
while (atomic_read(&bus->dpc_tskcnt)) {
+ atomic_set(&bus->dpc_tskcnt, 0);
brcmf_sdio_dpc(bus);
- atomic_dec(&bus->dpc_tskcnt);
}
}
+static void
+brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct brcmf_chip *ci, u32 drivestrength)
+{
+ const struct sdiod_drive_str *str_tab = NULL;
+ u32 str_mask;
+ u32 str_shift;
+ u32 base;
+ u32 i;
+ u32 drivestrength_sel = 0;
+ u32 cc_data_temp;
+ u32 addr;
+
+ if (!(ci->cc_caps & CC_CAP_PMU))
+ return;
+
+ switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = sdiod_drvstr_tab1_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+ str_tab = sdiod_drvstr_tab6_1v8;
+ str_mask = 0x00001800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+ /* note: 43143 does not support tristate */
+ i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
+ if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
+ str_tab = sdiod_drvstr_tab2_3v3;
+ str_mask = 0x00000007;
+ str_shift = 0;
+ } else
+ brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
+ ci->name, drivestrength);
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ ci->name, ci->chiprev, ci->pmurev);
+ break;
+ }
+
+ if (str_tab != NULL) {
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+ base = brcmf_chip_get_chipcommon(ci)->base;
+ addr = CORE_CC_REG(base, chipcontrol_addr);
+ brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+ cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+
+ brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
+ str_tab[i].strength, drivestrength, cc_data_temp);
+ }
+}
+
+static int brcmf_sdio_buscoreprep(void *ctx)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ int err = 0;
+ u8 clkval, clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (err) {
+ brcmf_err("error writing for HT off\n");
+ return err;
+ }
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ /* This may take up to 15 milliseconds */
+ clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+ if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+ brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ clkset, clkval);
+ return -EACCES;
+ }
+
+ SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval);
+ return -EBUSY;
+ }
+
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ udelay(65);
+
+ /* Also, disable the extra SDIO pull-ups */
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+ return 0;
+}
+
+static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
+ u32 rstvec)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ struct brcmf_core *core;
+ u32 reg_addr;
+
+ /* clear all interrupts */
+ core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
+ reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+
+ if (rstvec)
+ /* Write reset vector to address 0 */
+ brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+ sizeof(rstvec));
+}
+
+static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ u32 val, rev;
+
+ val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+ rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
+ if (rev >= 2) {
+ val &= ~CID_ID_MASK;
+ val |= BCM4339_CHIP_ID;
+ }
+ }
+ return val;
+}
+
+static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+
+ brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+}
+
+static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
+ .prepare = brcmf_sdio_buscoreprep,
+ .exit_dl = brcmf_sdio_buscore_exitdl,
+ .read32 = brcmf_sdio_buscore_read32,
+ .write32 = brcmf_sdio_buscore_write32,
+};
+
static bool
brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
{
@@ -3734,7 +3885,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
/*
- * Force PLL off until brcmf_sdio_chip_attach()
+ * Force PLL off until brcmf_chip_attach()
* programs PLL control regs
*/
@@ -3755,8 +3906,10 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
*/
brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
- if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
- brcmf_err("brcmf_sdio_chip_attach failed!\n");
+ bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
+ if (IS_ERR(bus->ci)) {
+ brcmf_err("brcmf_chip_attach failed!\n");
+ bus->ci = NULL;
goto fail;
}
@@ -3769,7 +3922,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
drivestrength = bus->sdiodev->pdata->drive_strength;
else
drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
- brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
+ brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
/* Get info on the SOCRAM cores... */
bus->ramsize = bus->ci->ramsize;
@@ -3792,24 +3945,18 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
- reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
+ reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
pmucontrol);
- reg_val = brcmf_sdiod_regrl(bus->sdiodev,
- reg_addr,
- &err);
+ reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- brcmf_sdiod_regwl(bus->sdiodev,
- reg_addr,
- reg_val,
- &err);
+ brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
if (err)
goto fail;
-
sdio_release_host(bus->sdiodev->func[1]);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -3849,6 +3996,7 @@ brcmf_sdio_watchdog_thread(void *data)
brcmf_sdio_bus_watchdog(bus);
/* Count the tick for reference */
bus->sdcnt.tickcnt++;
+ reinit_completion(&bus->watchdog_wait);
} else
break;
}
@@ -3925,7 +4073,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
}
spin_lock_init(&bus->rxctl_lock);
- spin_lock_init(&bus->txqlock);
+ spin_lock_init(&bus->txq_lock);
+ sema_init(&bus->tx_seq_lock, 1);
init_waitqueue_head(&bus->ctrl_wait);
init_waitqueue_head(&bus->dcmd_resp_wait);
@@ -4024,14 +4173,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
- cancel_work_sync(&bus->datawork);
- if (bus->brcmf_wq)
- destroy_workqueue(bus->brcmf_wq);
-
if (bus->sdiodev->bus_if->drvr) {
brcmf_detach(bus->sdiodev->dev);
}
+ cancel_work_sync(&bus->datawork);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
+
if (bus->ci) {
if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
sdio_claim_host(bus->sdiodev->func[1]);
@@ -4042,12 +4191,11 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
* all necessary cores.
*/
msleep(20);
- brcmf_sdio_chip_enter_download(bus->sdiodev,
- bus->ci);
+ brcmf_chip_enter_download(bus->ci);
brcmf_sdio_clkctl(bus, CLK_NONE, false);
sdio_release_host(bus->sdiodev->func[1]);
}
- brcmf_sdio_chip_detach(&bus->ci);
+ brcmf_chip_detach(bus->ci);
}
kfree(bus->rxbuf);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 22adbe311d20..59a5af5bf994 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -124,7 +124,8 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
}
static u32
-brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
+brcmf_create_iovar(char *name, const char *data, u32 datalen,
+ char *buf, u32 buflen)
{
u32 len;
@@ -144,7 +145,7 @@ brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
s32
-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 77eae86e55c2..a30be683f4a1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -83,7 +83,7 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len);
s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
u32 len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index af17a5bc8b83..614e4888504f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -48,6 +48,11 @@
#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
+/* OBSS Coex Auto/On/Off */
+#define BRCMF_OBSS_COEX_AUTO (-1)
+#define BRCMF_OBSS_COEX_OFF 0
+#define BRCMF_OBSS_COEX_ON 1
+
enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_CLIENT,
BRCMF_FIL_P2P_IF_GO,
@@ -87,6 +92,11 @@ struct brcmf_fil_bss_enable_le {
__le32 enable;
};
+struct brcmf_fil_bwcap_le {
+ __le32 band;
+ __le32 bw_cap;
+};
+
/**
* struct tdls_iovar - common structure for tdls iovars.
*
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index fc4f98b275d7..f3445ac627e4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -797,7 +797,8 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
/* SOCIAL CHANNELS 1, 6, 11 */
search_state = WL_P2P_DISC_ST_SEARCH;
brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
- } else if (dev != NULL && vif->mode == WL_MODE_AP) {
+ } else if (dev != NULL &&
+ vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
/* If you are already a GO, then do SEARCH only */
brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
search_state = WL_P2P_DISC_ST_SEARCH;
@@ -2256,7 +2257,6 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct brcmf_cfg80211_vif *vif;
enum brcmf_fil_p2p_if_types iftype;
- enum wl_mode mode;
int err;
if (brcmf_cfg80211_vif_event_armed(cfg))
@@ -2267,11 +2267,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
switch (type) {
case NL80211_IFTYPE_P2P_CLIENT:
iftype = BRCMF_FIL_P2P_IF_CLIENT;
- mode = WL_MODE_BSS;
break;
case NL80211_IFTYPE_P2P_GO:
iftype = BRCMF_FIL_P2P_IF_GO;
- mode = WL_MODE_AP;
break;
case NL80211_IFTYPE_P2P_DEVICE:
return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
deleted file mode 100644
index 82bf3c5d3cdc..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ /dev/null
@@ -1,972 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-/* ***** SDIO interface chip backplane handle functions ***** */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/ssb/ssb_regs.h>
-#include <linux/bcma/bcma.h>
-
-#include <chipcommon.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-#include <soc.h>
-#include "dhd_dbg.h"
-#include "sdio_host.h"
-#include "sdio_chip.h"
-
-/* chip core base & ramsize */
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE 0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE 0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE 0x18002000
-#define BCM4329_RAMSIZE 0x48000
-
-/* bcm43143 */
-/* SDIO device core */
-#define BCM43143_CORE_BUS_BASE 0x18002000
-/* internal memory core */
-#define BCM43143_CORE_SOCRAM_BASE 0x18004000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM43143_CORE_ARM_BASE 0x18003000
-#define BCM43143_RAMSIZE 0x70000
-
-/* All D11 cores, ID 0x812 */
-#define BCM43xx_CORE_D11_BASE 0x18001000
-
-#define SBCOREREV(sbidh) \
- ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
- ((sbidh) & SSB_IDHIGH_RCLO))
-
-/* SOC Interconnect types (aka chip types) */
-#define SOCI_SB 0
-#define SOCI_AI 1
-
-/* EROM CompIdentB */
-#define CIB_REV_MASK 0xff000000
-#define CIB_REV_SHIFT 24
-
-/* ARM CR4 core specific control flag bits */
-#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
-
-/* D11 core specific control flag bits */
-#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
-#define D11_BCMA_IOCTL_PHYRESET 0x0008
-
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
- u8 strength; /* Pad Drive Strength in mA */
- u8 sel; /* Chip-specific select value */
-};
-/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
- {32, 0x6},
- {26, 0x7},
- {22, 0x4},
- {16, 0x5},
- {12, 0x2},
- {8, 0x3},
- {4, 0x0},
- {0, 0x1}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
- {6, 0x7},
- {5, 0x6},
- {4, 0x5},
- {3, 0x4},
- {2, 0x2},
- {1, 0x1},
- {0, 0x0}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
-static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
- {3, 0x3},
- {2, 0x2},
- {1, 0x1},
- {0, 0x0} };
-
-/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
- {16, 0x7},
- {12, 0x5},
- {8, 0x3},
- {4, 0x1}
-};
-
-u8
-brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
-{
- u8 idx;
-
- for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
- if (coreid == ci->c_inf[idx].id)
- return idx;
-
- return BRCMF_MAX_CORENUM;
-}
-
-static u32
-brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidhigh),
- NULL);
- return SBCOREREV(regdata);
-}
-
-static u32
-brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
- return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-static bool
-brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return false;
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
- SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
- return (SSB_TMSLOW_CLOCK == regdata);
-}
-
-static bool
-brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
- bool ret;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return false;
-
- regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
- ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
- ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
-
- return ret;
-}
-
-static void
-brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits)
-{
- u32 regdata, base;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- base = ci->c_inf[idx].base;
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
- if (regdata & SSB_TMSLOW_RESET)
- return;
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
- if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
- /*
- * set target reject and spin until busy is clear
- * (preserve core-specific bits)
- */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata | SSB_TMSLOW_REJECT, NULL);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- udelay(1);
- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL) &
- SSB_TMSHIGH_BUSY), 100000);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL);
- if (regdata & SSB_TMSHIGH_BUSY)
- brcmf_err("core state still busy\n");
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
- if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- regdata |= SSB_IMSTATE_REJECT;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- udelay(1);
- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL) &
- SSB_IMSTATE_BUSY), 100000);
- }
-
- /* set reset and reject while enabling the clocks */
- regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
- SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- udelay(10);
-
- /* clear the initiator reject bit */
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
- if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- regdata &= ~SSB_IMSTATE_REJECT;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- }
- }
-
- /* leave reset and reject asserted */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
- udelay(1);
-}
-
-static void
-brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
-
- /* if core is already in reset, just return */
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
- if ((regdata & BCMA_RESET_CTL_RESET) != 0)
- return;
-
- /* configure reset */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-
- /* put in reset */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
- BCMA_RESET_CTL_RESET, NULL);
- usleep_range(10, 20);
-
- /* wait till reset is 1 */
- SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
- BCMA_RESET_CTL_RESET, 300);
-
- /* post reset configure */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-static void
-brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- /*
- * Must do the disable sequence first to work for
- * arbitrary current core state.
- */
- brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
- in_resetbits);
-
- /*
- * Now do the initialization sequence.
- * set reset while enabling the clock and
- * forcing them on throughout the core
- */
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
- NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-
- /* clear any serror */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- NULL);
- if (regdata & SSB_TMSHIGH_SERR)
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- 0, NULL);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- NULL);
- if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
- NULL);
-
- /* clear reset and allow it to propagate throughout the core */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-
- /* leave clock enabled */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-}
-
-static void
-brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
-
- /* must disable first to work for arbitrary current core state */
- brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
- in_resetbits);
-
- while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
- BCMA_RESET_CTL_RESET) {
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
- usleep_range(40, 60);
- }
-
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
- BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-#ifdef DEBUG
-/* safety check for chipinfo */
-static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
- u8 core_idx;
-
- /* check RAM core presence for ARM CM3 core */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != core_idx) {
- core_idx = brcmf_sdio_chip_getinfidx(ci,
- BCMA_CORE_INTERNAL_MEM);
- if (BRCMF_MAX_CORENUM == core_idx) {
- brcmf_err("RAM core not provided with ARM CM3 core\n");
- return -ENODEV;
- }
- }
-
- /* check RAM base for ARM CR4 core */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
- if (BRCMF_MAX_CORENUM != core_idx) {
- if (ci->rambase == 0) {
- brcmf_err("RAM base not provided with ARM CR4 core\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-#else /* DEBUG */
-static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
- return 0;
-}
-#endif
-
-static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u32 regdata;
- u32 socitype;
-
- /* Get CC core rev
- * Chipid is assume to be at offset 0 from SI_ENUM_BASE
- * For different chiptypes or old sdio hosts w/o chipcommon,
- * other ways of recognition should be added here.
- */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(SI_ENUM_BASE, chipid),
- NULL);
- ci->chip = regdata & CID_ID_MASK;
- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
- if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
- ci->chiprev >= 2)
- ci->chip = BCM4339_CHIP_ID;
- socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
-
- brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
- socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
-
- if (socitype == SOCI_SB) {
- if (ci->chip != BCM4329_CHIP_ID) {
- brcmf_err("SB chip is not supported\n");
- return -ENODEV;
- }
- ci->iscoreup = brcmf_sdio_sb_iscoreup;
- ci->corerev = brcmf_sdio_sb_corerev;
- ci->coredisable = brcmf_sdio_sb_coredisable;
- ci->resetcore = brcmf_sdio_sb_resetcore;
-
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = SI_ENUM_BASE;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->ramsize = BCM4329_RAMSIZE;
- } else if (socitype == SOCI_AI) {
- ci->iscoreup = brcmf_sdio_ai_iscoreup;
- ci->corerev = brcmf_sdio_ai_corerev;
- ci->coredisable = brcmf_sdio_ai_coredisable;
- ci->resetcore = brcmf_sdio_ai_resetcore;
-
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = SI_ENUM_BASE;
-
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM43143_CHIP_ID:
- ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
- ci->c_inf[0].cib = 0x2b000000;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
- ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
- ci->c_inf[1].cib = 0x18000000;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
- ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
- ci->c_inf[2].cib = 0x14000000;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->c_inf[3].cib = 0x07000000;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = BCM43143_RAMSIZE;
- break;
- case BCM43241_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2a084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0e004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x14080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x90000;
- break;
- case BCM4330_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x07004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x0d080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x48000;
- break;
- case BCM4334_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x29004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0d004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x13080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x80000;
- break;
- case BCM4335_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2b084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x0f004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x01084411;
- ci->c_inf[3].id = BCMA_CORE_80211;
- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- case BCM43362_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0a004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x08080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x3C000;
- break;
- case BCM4339_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2e084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x15004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x04084411;
- ci->c_inf[3].id = BCMA_CORE_80211;
- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- default:
- brcmf_err("AXI chip is not supported\n");
- return -ENODEV;
- }
- } else {
- brcmf_err("chip backplane type %u is not supported\n",
- socitype);
- return -ENODEV;
- }
-
- return brcmf_sdio_chip_cichk(ci);
-}
-
-static int
-brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
-{
- int err = 0;
- u8 clkval, clkset;
-
- /* Try forcing SDIO core to do ALPAvail request only */
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- if (err) {
- brcmf_err("error writing for HT off\n");
- return err;
- }
-
- /* If register supported, wait for ALPAvail and then force ALP */
- /* This may take up to 15 milliseconds */
- clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
-
- if ((clkval & ~SBSDIO_AVBITS) != clkset) {
- brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
- clkset, clkval);
- return -EACCES;
- }
-
- SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
- if (!SBSDIO_ALPAV(clkval)) {
- brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
- clkval);
- return -EBUSY;
- }
-
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- udelay(65);
-
- /* Also, disable the extra SDIO pull-ups */
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
- return 0;
-}
-
-static void
-brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u32 base = ci->c_inf[0].base;
-
- /* get chipcommon rev */
- ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
-
- /* get chipcommon capabilites */
- ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(base, capabilities),
- NULL);
-
- /* get pmu caps & rev */
- if (ci->c_inf[0].caps & CC_CAP_PMU) {
- ci->pmucaps =
- brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(base, pmucapabilities),
- NULL);
- ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
- }
-
- ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
-
- brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
- ci->c_inf[0].rev, ci->pmurev,
- ci->c_inf[1].rev, ci->c_inf[1].id);
-
- /*
- * Make sure any on-chip ARM is off (in case strapping is wrong),
- * or downloaded code was already running.
- */
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
-}
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip **ci_ptr)
-{
- int ret;
- struct brcmf_chip *ci;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
- if (!ci)
- return -ENOMEM;
-
- ret = brcmf_sdio_chip_buscoreprep(sdiodev);
- if (ret != 0)
- goto err;
-
- ret = brcmf_sdio_chip_recognition(sdiodev, ci);
- if (ret != 0)
- goto err;
-
- brcmf_sdio_chip_buscoresetup(sdiodev, ci);
-
- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
- 0, NULL);
- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
- 0, NULL);
-
- *ci_ptr = ci;
- return 0;
-
-err:
- kfree(ci);
- return ret;
-}
-
-void
-brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- kfree(*ci_ptr);
- *ci_ptr = NULL;
-}
-
-static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
-{
- const char *fmt;
-
- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
- snprintf(buf, len, fmt, chipid);
- return buf;
-}
-
-void
-brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 drivestrength)
-{
- const struct sdiod_drive_str *str_tab = NULL;
- u32 str_mask;
- u32 str_shift;
- char chn[8];
- u32 base = ci->c_inf[0].base;
- u32 i;
- u32 drivestrength_sel = 0;
- u32 cc_data_temp;
- u32 addr;
-
- if (!(ci->c_inf[0].caps & CC_CAP_PMU))
- return;
-
- switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
- case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
- str_tab = sdiod_drvstr_tab1_1v8;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
- str_tab = sdiod_drvstr_tab6_1v8;
- str_mask = 0x00001800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
- /* note: 43143 does not support tristate */
- i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
- if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
- str_tab = sdiod_drvstr_tab2_3v3;
- str_mask = 0x00000007;
- str_shift = 0;
- } else
- brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
- brcmf_sdio_chip_name(ci->chip, chn, 8),
- drivestrength);
- break;
- case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
- str_tab = sdiod_drive_strength_tab5_1v8;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- default:
- brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
- brcmf_sdio_chip_name(ci->chip, chn, 8),
- ci->chiprev, ci->pmurev);
- break;
- }
-
- if (str_tab != NULL) {
- for (i = 0; str_tab[i].strength != 0; i++) {
- if (drivestrength >= str_tab[i].strength) {
- drivestrength_sel = str_tab[i].sel;
- break;
- }
- }
- addr = CORE_CC_REG(base, chipcontrol_addr);
- brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
- cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
- cc_data_temp &= ~str_mask;
- drivestrength_sel <<= str_shift;
- cc_data_temp |= drivestrength_sel;
- brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
-
- brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
- str_tab[i].strength, drivestrength, cc_data_temp);
- }
-}
-
-static void
-brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
- ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
-}
-
-static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 core_idx;
- u32 reg_addr;
-
- if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
- brcmf_err("SOCRAM core is down after reset?\n");
- return false;
- }
-
- /* clear all interrupts */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
- reg_addr = ci->c_inf[core_idx].base;
- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
-
- return true;
-}
-
-static inline void
-brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
- idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
-
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
- regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
- ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
-}
-
-static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec)
-{
- u8 core_idx;
- u32 reg_addr;
-
- /* clear all interrupts */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
- reg_addr = ci->c_inf[core_idx].base;
- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
- /* Write reset vector to address 0 */
- brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
- sizeof(rstvec));
-
- /* restore ARM */
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
- 0, 0);
-
- return true;
-}
-
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 arm_core_idx;
-
- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != arm_core_idx) {
- brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
- return;
- }
-
- brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
-}
-
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec)
-{
- u8 arm_core_idx;
-
- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != arm_core_idx)
- return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
-
- return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
deleted file mode 100644
index fb0614329ede..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMFMAC_SDIO_CHIP_H_
-#define _BRCMFMAC_SDIO_CHIP_H_
-
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
- (base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
-/* SDIO function 1 register CHIPCLKCSR */
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP 0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT 0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP 0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ 0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ 0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL 0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL 0x80
-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-#define SBSDIO_CLKAV(regval, alponly) \
- (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
-#define BRCMF_MAX_CORENUM 6
-
-struct brcmf_core {
- u16 id;
- u16 rev;
- u32 base;
- u32 wrapbase;
- u32 caps;
- u32 cib;
-};
-
-struct brcmf_chip {
- u32 chip;
- u32 chiprev;
- /* core info */
- /* always put chipcommon core at 0, bus core at 1 */
- struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
- u32 pmurev;
- u32 pmucaps;
- u32 ramsize;
- u32 rambase;
- u32 rst_vec; /* reset vertor for ARM CR4 core */
-
- bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
- u16 coreid);
- u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
- u16 coreid);
- void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits);
- void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits);
-};
-
-struct sbconfig {
- u32 PAD[2];
- u32 sbipsflag; /* initiator port ocp slave flag */
- u32 PAD[3];
- u32 sbtpsflag; /* target port ocp slave flag */
- u32 PAD[11];
- u32 sbtmerrloga; /* (sonics >= 2.3) */
- u32 PAD;
- u32 sbtmerrlog; /* (sonics >= 2.3) */
- u32 PAD[3];
- u32 sbadmatch3; /* address match3 */
- u32 PAD;
- u32 sbadmatch2; /* address match2 */
- u32 PAD;
- u32 sbadmatch1; /* address match1 */
- u32 PAD[7];
- u32 sbimstate; /* initiator agent state */
- u32 sbintvec; /* interrupt mask */
- u32 sbtmstatelow; /* target state */
- u32 sbtmstatehigh; /* target state */
- u32 sbbwa0; /* bandwidth allocation table0 */
- u32 PAD;
- u32 sbimconfiglow; /* initiator configuration */
- u32 sbimconfighigh; /* initiator configuration */
- u32 sbadmatch0; /* address match0 */
- u32 PAD;
- u32 sbtmconfiglow; /* target configuration */
- u32 sbtmconfighigh; /* target configuration */
- u32 sbbconfig; /* broadcast configuration */
- u32 PAD;
- u32 sbbstate; /* broadcast state */
- u32 PAD[3];
- u32 sbactcnfg; /* activate configuration */
- u32 PAD[3];
- u32 sbflagst; /* current sbflags */
- u32 PAD[3];
- u32 sbidlow; /* identification */
- u32 sbidhigh; /* identification */
-};
-
-/* sdio core registers */
-struct sdpcmd_regs {
- u32 corecontrol; /* 0x00, rev8 */
- u32 corestatus; /* rev8 */
- u32 PAD[1];
- u32 biststatus; /* rev8 */
-
- /* PCMCIA access */
- u16 pcmciamesportaladdr; /* 0x010, rev8 */
- u16 PAD[1];
- u16 pcmciamesportalmask; /* rev8 */
- u16 PAD[1];
- u16 pcmciawrframebc; /* rev8 */
- u16 PAD[1];
- u16 pcmciaunderflowtimer; /* rev8 */
- u16 PAD[1];
-
- /* interrupt */
- u32 intstatus; /* 0x020, rev8 */
- u32 hostintmask; /* rev8 */
- u32 intmask; /* rev8 */
- u32 sbintstatus; /* rev8 */
- u32 sbintmask; /* rev8 */
- u32 funcintmask; /* rev4 */
- u32 PAD[2];
- u32 tosbmailbox; /* 0x040, rev8 */
- u32 tohostmailbox; /* rev8 */
- u32 tosbmailboxdata; /* rev8 */
- u32 tohostmailboxdata; /* rev8 */
-
- /* synchronized access to registers in SDIO clock domain */
- u32 sdioaccess; /* 0x050, rev8 */
- u32 PAD[3];
-
- /* PCMCIA frame control */
- u8 pcmciaframectrl; /* 0x060, rev8 */
- u8 PAD[3];
- u8 pcmciawatermark; /* rev8 */
- u8 PAD[155];
-
- /* interrupt batching control */
- u32 intrcvlazy; /* 0x100, rev8 */
- u32 PAD[3];
-
- /* counters */
- u32 cmd52rd; /* 0x110, rev8 */
- u32 cmd52wr; /* rev8 */
- u32 cmd53rd; /* rev8 */
- u32 cmd53wr; /* rev8 */
- u32 abort; /* rev8 */
- u32 datacrcerror; /* rev8 */
- u32 rdoutofsync; /* rev8 */
- u32 wroutofsync; /* rev8 */
- u32 writebusy; /* rev8 */
- u32 readwait; /* rev8 */
- u32 readterm; /* rev8 */
- u32 writeterm; /* rev8 */
- u32 PAD[40];
- u32 clockctlstatus; /* rev8 */
- u32 PAD[7];
-
- u32 PAD[128]; /* DMA engines */
-
- /* SDIO/PCMCIA CIS region */
- char cis[512]; /* 0x400-0x5ff, rev6 */
-
- /* PCMCIA function control registers */
- char pcmciafcr[256]; /* 0x600-6ff, rev6 */
- u16 PAD[55];
-
- /* PCMCIA backplane access */
- u16 backplanecsr; /* 0x76E, rev6 */
- u16 backplaneaddr0; /* rev6 */
- u16 backplaneaddr1; /* rev6 */
- u16 backplaneaddr2; /* rev6 */
- u16 backplaneaddr3; /* rev6 */
- u16 backplanedata0; /* rev6 */
- u16 backplanedata1; /* rev6 */
- u16 backplanedata2; /* rev6 */
- u16 backplanedata3; /* rev6 */
- u16 PAD[31];
-
- /* sprom "size" & "blank" info */
- u16 spromstatus; /* 0x7BE, rev2 */
- u32 PAD[464];
-
- u16 PAD[0x80];
-};
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci,
- u32 drivestrength);
-u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci);
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec);
-
-#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 092e9c824992..3deab7959a0d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -180,6 +180,97 @@ struct brcmf_sdio_dev {
uint max_request_size;
ushort max_segment_count;
uint max_segment_size;
+ uint txglomsz;
+ struct sg_table sgtable;
+};
+
+/* sdio core registers */
+struct sdpcmd_regs {
+ u32 corecontrol; /* 0x00, rev8 */
+ u32 corestatus; /* rev8 */
+ u32 PAD[1];
+ u32 biststatus; /* rev8 */
+
+ /* PCMCIA access */
+ u16 pcmciamesportaladdr; /* 0x010, rev8 */
+ u16 PAD[1];
+ u16 pcmciamesportalmask; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciawrframebc; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciaunderflowtimer; /* rev8 */
+ u16 PAD[1];
+
+ /* interrupt */
+ u32 intstatus; /* 0x020, rev8 */
+ u32 hostintmask; /* rev8 */
+ u32 intmask; /* rev8 */
+ u32 sbintstatus; /* rev8 */
+ u32 sbintmask; /* rev8 */
+ u32 funcintmask; /* rev4 */
+ u32 PAD[2];
+ u32 tosbmailbox; /* 0x040, rev8 */
+ u32 tohostmailbox; /* rev8 */
+ u32 tosbmailboxdata; /* rev8 */
+ u32 tohostmailboxdata; /* rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ u32 sdioaccess; /* 0x050, rev8 */
+ u32 PAD[3];
+
+ /* PCMCIA frame control */
+ u8 pcmciaframectrl; /* 0x060, rev8 */
+ u8 PAD[3];
+ u8 pcmciawatermark; /* rev8 */
+ u8 PAD[155];
+
+ /* interrupt batching control */
+ u32 intrcvlazy; /* 0x100, rev8 */
+ u32 PAD[3];
+
+ /* counters */
+ u32 cmd52rd; /* 0x110, rev8 */
+ u32 cmd52wr; /* rev8 */
+ u32 cmd53rd; /* rev8 */
+ u32 cmd53wr; /* rev8 */
+ u32 abort; /* rev8 */
+ u32 datacrcerror; /* rev8 */
+ u32 rdoutofsync; /* rev8 */
+ u32 wroutofsync; /* rev8 */
+ u32 writebusy; /* rev8 */
+ u32 readwait; /* rev8 */
+ u32 readterm; /* rev8 */
+ u32 writeterm; /* rev8 */
+ u32 PAD[40];
+ u32 clockctlstatus; /* rev8 */
+ u32 PAD[7];
+
+ u32 PAD[128]; /* DMA engines */
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* 0x600-6ff, rev6 */
+ u16 PAD[55];
+
+ /* PCMCIA backplane access */
+ u16 backplanecsr; /* 0x76E, rev6 */
+ u16 backplaneaddr0; /* rev6 */
+ u16 backplaneaddr1; /* rev6 */
+ u16 backplaneaddr2; /* rev6 */
+ u16 backplaneaddr3; /* rev6 */
+ u16 backplanedata0; /* rev6 */
+ u16 backplanedata1; /* rev6 */
+ u16 backplanedata2; /* rev6 */
+ u16 backplanedata3; /* rev6 */
+ u16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ u16 spromstatus; /* 0x7BE, rev2 */
+ u32 PAD[464];
+
+ u16 PAD[0x80];
};
/* Register/deregister interrupt handler. */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index d7718a5fa2f0..afb3d15e38ff 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/etherdevice.h>
+#include <linux/module.h>
#include <net/cfg80211.h>
#include <net/netlink.h>
@@ -190,6 +191,7 @@ static struct ieee80211_supported_band __wl_band_2ghz = {
.n_channels = ARRAY_SIZE(__wl_2ghz_channels),
.bitrates = wl_g_rates,
.n_bitrates = wl_g_rates_size,
+ .ht_cap = {IEEE80211_HT_CAP_SUP_WIDTH_20_40, true},
};
static struct ieee80211_supported_band __wl_band_5ghz_a = {
@@ -251,6 +253,10 @@ struct parsed_vndr_ies {
struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
};
+static int brcmf_roamoff;
+module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
+MODULE_PARM_DESC(roamoff, "do not use internal roaming engine");
+
/* Quarter dBm units to mW
* Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
* Table is offset so the last entry is largest mW value that fits in
@@ -351,13 +357,11 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
* triples, returning a pointer to the substring whose first element
* matches tag
*/
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key)
{
- struct brcmf_tlv *elt;
- int totlen;
-
- elt = (struct brcmf_tlv *)buf;
- totlen = buflen;
+ const struct brcmf_tlv *elt = buf;
+ int totlen = buflen;
/* find tagged parameter */
while (totlen >= TLV_HDR_LEN) {
@@ -378,8 +382,8 @@ struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
* not update the tlvs buffer pointer/length.
*/
static bool
-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
- u8 *oui, u32 oui_len, u8 type)
+brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type)
{
/* If the contents match the OUI and the type */
if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
@@ -401,12 +405,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
}
static struct brcmf_vs_tlv *
-brcmf_find_wpaie(u8 *parse, u32 len)
+brcmf_find_wpaie(const u8 *parse, u32 len)
{
- struct brcmf_tlv *ie;
+ const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
- if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
return (struct brcmf_vs_tlv *)ie;
}
@@ -414,9 +418,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
}
static struct brcmf_vs_tlv *
-brcmf_find_wpsie(u8 *parse, u32 len)
+brcmf_find_wpsie(const u8 *parse, u32 len)
{
- struct brcmf_tlv *ie;
+ const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
@@ -491,6 +495,19 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
return err;
}
+static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
+{
+ enum nl80211_iftype iftype;
+
+ iftype = vif->wdev.iftype;
+ return iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO;
+}
+
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
+{
+ return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
+}
+
static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
const char *name,
enum nl80211_iftype type,
@@ -651,7 +668,6 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
type);
return -EOPNOTSUPP;
case NL80211_IFTYPE_ADHOC:
- vif->mode = WL_MODE_IBSS;
infra = 0;
break;
case NL80211_IFTYPE_STATION:
@@ -667,12 +683,10 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
*/
return 0;
}
- vif->mode = WL_MODE_BSS;
infra = 1;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- vif->mode = WL_MODE_AP;
ap = 1;
break;
default:
@@ -696,7 +710,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
err = -EAGAIN;
goto done;
}
- brcmf_dbg(INFO, "IF Type = %s\n", (vif->mode == WL_MODE_IBSS) ?
+ brcmf_dbg(INFO, "IF Type = %s\n", brcmf_is_ibssmode(vif) ?
"Adhoc" : "Infra");
}
ndev->ieee80211_ptr->iftype = type;
@@ -1340,13 +1354,14 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
}
static s32
-brcmf_set_set_cipher(struct net_device *ndev,
- struct cfg80211_connect_params *sme)
+brcmf_set_wsec_mode(struct net_device *ndev,
+ struct cfg80211_connect_params *sme, bool mfp)
{
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
s32 pval = 0;
s32 gval = 0;
+ s32 wsec;
s32 err = 0;
if (sme->crypto.n_ciphers_pairwise) {
@@ -1398,7 +1413,12 @@ brcmf_set_set_cipher(struct net_device *ndev,
if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
sme->privacy)
pval = AES_ENABLED;
- err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval);
+
+ if (mfp)
+ wsec = pval | gval | MFP_CAPABLE;
+ else
+ wsec = pval | gval;
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec);
if (err) {
brcmf_err("error (%d)\n", err);
return err;
@@ -1562,13 +1582,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
- struct brcmf_tlv *rsn_ie;
- struct brcmf_vs_tlv *wpa_ie;
- void *ie;
+ const struct brcmf_tlv *rsn_ie;
+ const struct brcmf_vs_tlv *wpa_ie;
+ const void *ie;
u32 ie_len;
struct brcmf_ext_join_params_le *ext_join_params;
u16 chanspec;
-
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1591,7 +1610,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ie_len = wpa_ie->len + TLV_HDR_LEN;
} else {
/* find the RSN_IE */
- rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
+ sme->ie_len,
WLAN_EID_RSN);
if (rsn_ie) {
ie = rsn_ie;
@@ -1636,7 +1656,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- err = brcmf_set_set_cipher(ndev, sme);
+ err = brcmf_set_wsec_mode(ndev, sme, sme->mfp == NL80211_MFP_REQUIRED);
if (err) {
brcmf_err("wl_set_set_cipher failed (%d)\n", err);
goto done;
@@ -1678,22 +1698,9 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
profile->ssid.SSID_len);
- /*increase dwell time to receive probe response or detect Beacon
- * from target AP at a noisy air only during connect command
- */
- ext_join_params->scan_le.active_time =
- cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
- ext_join_params->scan_le.passive_time =
- cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+
/* Set up join scan parameters */
ext_join_params->scan_le.scan_type = -1;
- /* to sync with presence period of VSDB GO.
- * Send probe request more frequently. Probe request will be stopped
- * when it gets probe response from target AP/GO.
- */
- ext_join_params->scan_le.nprobes =
- cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
- BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
ext_join_params->scan_le.home_time = cpu_to_le32(-1);
if (sme->bssid)
@@ -1706,6 +1713,25 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ext_join_params->assoc_le.chanspec_list[0] =
cpu_to_le16(chanspec);
+ /* Increase dwell time to receive probe response or detect
+ * beacon from target AP at a noisy air only during connect
+ * command.
+ */
+ ext_join_params->scan_le.active_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+ ext_join_params->scan_le.passive_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+ /* To sync with presence period of VSDB GO send probe request
+ * more frequently. Probe request will be stopped when it gets
+ * probe response from target AP/GO.
+ */
+ ext_join_params->scan_le.nprobes =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+ BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+ } else {
+ ext_join_params->scan_le.active_time = cpu_to_le32(-1);
+ ext_join_params->scan_le.passive_time = cpu_to_le32(-1);
+ ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
}
err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
@@ -1913,7 +1939,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
memcpy(key.data, params->key, key.len);
- if ((ifp->vif->mode != WL_MODE_AP) &&
+ if (!brcmf_is_apmode(ifp->vif) &&
(params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
@@ -1981,7 +2007,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
- if (mac_addr) {
+ if (mac_addr &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
brcmf_dbg(TRACE, "Exit");
return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
}
@@ -2010,7 +2038,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
- if (ifp->vif->mode != WL_MODE_AP) {
+ if (!brcmf_is_apmode(ifp->vif)) {
brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
@@ -2164,12 +2192,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
u8 *bssid = profile->bssid;
struct brcmf_sta_info_le sta_info_le;
+ u32 beacon_period;
+ u32 dtim_period;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
if (!check_vif_up(ifp->vif))
return -EIO;
- if (ifp->vif->mode == WL_MODE_AP) {
+ if (brcmf_is_apmode(ifp->vif)) {
memcpy(&sta_info_le, mac, ETH_ALEN);
err = brcmf_fil_iovar_data_get(ifp, "sta_info",
&sta_info_le,
@@ -2186,7 +2216,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
sinfo->inactive_time, sinfo->connected_time);
- } else if (ifp->vif->mode == WL_MODE_BSS) {
+ } else if (ifp->vif->wdev.iftype == NL80211_IFTYPE_STATION) {
if (memcmp(mac, bssid, ETH_ALEN)) {
brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
mac, bssid);
@@ -2218,6 +2248,30 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
+ &beacon_period);
+ if (err) {
+ brcmf_err("Could not get beacon period (%d)\n",
+ err);
+ goto done;
+ } else {
+ sinfo->bss_param.beacon_interval =
+ beacon_period;
+ brcmf_dbg(CONN, "Beacon peroid %d\n",
+ beacon_period);
+ }
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
+ &dtim_period);
+ if (err) {
+ brcmf_err("Could not get DTIM period (%d)\n",
+ err);
+ goto done;
+ } else {
+ sinfo->bss_param.dtim_period = dtim_period;
+ brcmf_dbg(CONN, "DTIM peroid %d\n",
+ dtim_period);
+ }
+ sinfo->filled |= STATION_INFO_BSS_PARAM;
}
} else
err = -EPERM;
@@ -2444,18 +2498,13 @@ CleanUp:
return err;
}
-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
-{
- return vif->mode == WL_MODE_IBSS;
-}
-
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
- struct brcmf_tlv *tim;
+ const struct brcmf_tlv *tim;
u16 beacon_interval;
u8 dtim_period;
size_t ie_len;
@@ -3220,8 +3269,9 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
}
static s32
-brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
- bool is_rsn_ie)
+brcmf_configure_wpaie(struct net_device *ndev,
+ const struct brcmf_vs_tlv *wpa_ie,
+ bool is_rsn_ie)
{
struct brcmf_if *ifp = netdev_priv(ndev);
u32 auth = 0; /* d11 open authentication */
@@ -3707,11 +3757,11 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
s32 ie_offset;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_tlv *ssid_ie;
+ const struct brcmf_tlv *ssid_ie;
struct brcmf_ssid_le ssid_le;
s32 err = -EPERM;
- struct brcmf_tlv *rsn_ie;
- struct brcmf_vs_tlv *wpa_ie;
+ const struct brcmf_tlv *rsn_ie;
+ const struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
@@ -4220,32 +4270,6 @@ static struct cfg80211_ops wl_cfg80211_ops = {
CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
};
-static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
-{
- switch (type) {
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- return -ENOTSUPP;
- case NL80211_IFTYPE_ADHOC:
- return WL_MODE_IBSS;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_P2P_CLIENT:
- return WL_MODE_BSS;
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_P2P_GO:
- return WL_MODE_AP;
- case NL80211_IFTYPE_P2P_DEVICE:
- return WL_MODE_P2P;
- case NL80211_IFTYPE_UNSPECIFIED:
- default:
- break;
- }
-
- return -EINVAL;
-}
-
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
/* scheduled scan settings */
@@ -4370,7 +4394,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
vif->wdev.wiphy = cfg->wiphy;
vif->wdev.iftype = type;
- vif->mode = brcmf_nl80211_iftype_to_mode(type);
vif->pm_block = pm_block;
vif->roam_off = -1;
@@ -4416,7 +4439,9 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
u32 event = e->event_code;
u16 flags = e->flags;
- if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
+ if ((event == BRCMF_E_DEAUTH) || (event == BRCMF_E_DEAUTH_IND) ||
+ (event == BRCMF_E_DISASSOC_IND) ||
+ ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
brcmf_dbg(CONN, "Processing link down\n");
return true;
}
@@ -4658,16 +4683,19 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct net_device *ndev = ifp->ndev;
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct ieee80211_channel *chan;
s32 err = 0;
+ u16 reason;
- if (ifp->vif->mode == WL_MODE_AP) {
+ if (brcmf_is_apmode(ifp->vif)) {
err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
} else if (brcmf_is_linkup(e)) {
brcmf_dbg(CONN, "Linkup\n");
if (brcmf_is_ibssmode(ifp->vif)) {
+ chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
memcpy(profile->bssid, e->addr, ETH_ALEN);
wl_inform_ibss(cfg, ndev, e->addr);
- cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
+ cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
clear_bit(BRCMF_VIF_STATUS_CONNECTING,
&ifp->vif->sme_state);
set_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -4679,9 +4707,15 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
if (!brcmf_is_ibssmode(ifp->vif)) {
brcmf_bss_connect_done(cfg, ndev, e, false);
if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
- &ifp->vif->sme_state))
- cfg80211_disconnected(ndev, 0, NULL, 0,
+ &ifp->vif->sme_state)) {
+ reason = 0;
+ if (((e->event_code == BRCMF_E_DEAUTH_IND) ||
+ (e->event_code == BRCMF_E_DISASSOC_IND)) &&
+ (e->reason != WLAN_REASON_UNSPECIFIED))
+ reason = e->reason;
+ cfg80211_disconnected(ndev, reason, NULL, 0,
GFP_KERNEL);
+ }
}
brcmf_link_down(ifp->vif);
brcmf_init_prof(ndev_to_prof(ndev));
@@ -4875,11 +4909,8 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
cfg->scan_request = NULL;
cfg->pwr_save = true;
- cfg->roam_on = true; /* roam on & off switch.
- we enable roam per default */
- cfg->active_scan = true; /* we do active scan for
- specific scan per default */
- cfg->dongle_up = false; /* dongle is not up yet */
+ cfg->active_scan = true; /* we do active scan per default */
+ cfg->dongle_up = false; /* dongle is not up yet */
err = brcmf_init_priv_mem(cfg);
if (err)
return err;
@@ -4904,6 +4935,30 @@ static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
mutex_init(&event->vif_event_lock);
}
+static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
+{
+ struct brcmf_fil_bwcap_le band_bwcap;
+ u32 val;
+ int err;
+
+ /* verify support for bw_cap command */
+ val = WLC_BAND_5G;
+ err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
+
+ if (!err) {
+ /* only set 2G bandwidth using bw_cap command */
+ band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
+ band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+ err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
+ sizeof(band_bwcap));
+ } else {
+ brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
+ val = WLC_N_BW_40ALL;
+ err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
+ }
+ return err;
+}
+
struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct device *busdev)
{
@@ -4961,6 +5016,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
goto cfg80211_p2p_attach_out;
}
+ /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
+ * setup 40MHz in 2GHz band and enable OBSS scanning.
+ */
+ if (wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ err = brcmf_enable_bw40_2g(ifp);
+ if (!err)
+ err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
+ BRCMF_OBSS_COEX_AUTO);
+ }
+
err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
if (err) {
brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
@@ -4999,7 +5065,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
}
static s32
-brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
+brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout)
{
s32 err = 0;
__le32 roamtrigger[2];
@@ -5009,7 +5075,7 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
* Setup timeout if Beacons are lost and roam is
* off to report link down
*/
- if (roamvar) {
+ if (brcmf_roamoff) {
err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
if (err) {
brcmf_err("bcn_timeout error (%d)\n", err);
@@ -5021,8 +5087,9 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
* Enable/Disable built-in roaming to allow supplicant
* to take care of roaming
*/
- brcmf_dbg(INFO, "Internal Roaming = %s\n", roamvar ? "Off" : "On");
- err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar);
+ brcmf_dbg(INFO, "Internal Roaming = %s\n",
+ brcmf_roamoff ? "Off" : "On");
+ err = brcmf_fil_iovar_int_set(ifp, "roam_off", !!(brcmf_roamoff));
if (err) {
brcmf_err("roam_off error (%d)\n", err);
goto dongle_rom_out;
@@ -5164,9 +5231,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
ieee80211_channel_to_frequency(ch.chnum, band);
band_chan_arr[index].hw_value = ch.chnum;
- brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
- ch.chnum, band_chan_arr[index].center_freq,
- ch.bw, ch.sb);
if (ch.bw == BRCMU_CHAN_BW_40) {
/* assuming the order is HT20, HT40 Upper,
* HT40 lower from chanspecs
@@ -5267,6 +5331,8 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
u32 band_list[3];
u32 nmode;
u32 bw_cap[2] = { 0, 0 };
+ u32 rxchain;
+ u32 nchain;
s8 phy;
s32 err;
u32 nband;
@@ -5303,6 +5369,16 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
+ err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
+ if (err) {
+ brcmf_err("rxchain error (%d)\n", err);
+ nchain = 1;
+ } else {
+ for (nchain = 0; rxchain; nchain++)
+ rxchain = rxchain & (rxchain - 1);
+ }
+ brcmf_dbg(INFO, "nchain=%d\n", nchain);
+
err = brcmf_construct_reginfo(cfg, bw_cap);
if (err) {
brcmf_err("brcmf_construct_reginfo failed (%d)\n", err);
@@ -5331,10 +5407,7 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
band->ht_cap.ht_supported = true;
band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
- /* An HT shall support all EQM rates for one spatial
- * stream
- */
- band->ht_cap.mcs.rx_mask[0] = 0xff;
+ memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
bands[band->band] = band;
}
@@ -5381,7 +5454,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
brcmf_dbg(INFO, "power save set to %s\n",
(power_mode ? "enabled" : "disabled"));
- err = brcmf_dongle_roam(ifp, (cfg->roam_on ? 0 : 1), WL_BEACON_TIMEOUT);
+ err = brcmf_dongle_roam(ifp, WL_BEACON_TIMEOUT);
if (err)
goto default_conf_out;
err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 2dc6a074e8ed..283c525a44f7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -89,21 +89,6 @@ enum brcmf_scan_status {
BRCMF_SCAN_STATUS_SUPPRESS,
};
-/**
- * enum wl_mode - driver mode of virtual interface.
- *
- * @WL_MODE_BSS: connects to BSS.
- * @WL_MODE_IBSS: operate as ad-hoc.
- * @WL_MODE_AP: operate as access-point.
- * @WL_MODE_P2P: provide P2P discovery.
- */
-enum wl_mode {
- WL_MODE_BSS,
- WL_MODE_IBSS,
- WL_MODE_AP,
- WL_MODE_P2P
-};
-
/* dongle configuration */
struct brcmf_cfg80211_conf {
u32 frag_threshold;
@@ -193,7 +178,6 @@ struct vif_saved_ie {
* @ifp: lower layer interface pointer
* @wdev: wireless device.
* @profile: profile information.
- * @mode: operating mode.
* @roam_off: roaming state.
* @sme_state: SME state using enum brcmf_vif_status bits.
* @pm_block: power-management blocked.
@@ -204,7 +188,6 @@ struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
struct wireless_dev wdev;
struct brcmf_cfg80211_profile profile;
- s32 mode;
s32 roam_off;
unsigned long sme_state;
bool pm_block;
@@ -402,7 +385,6 @@ struct brcmf_cfg80211_info {
bool ibss_starter;
bool pwr_save;
bool dongle_up;
- bool roam_on;
bool scan_tried;
u8 *dcmd_buf;
u8 *extra_buf;
@@ -491,7 +473,8 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len);
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key);
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch);
u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 925034b80e9c..8c5fa4e58139 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
bool blocked;
int err;
+ if (!wl->ucode.bcm43xx_bomminor) {
+ err = brcms_request_fw(wl, wl->wlc->hw->d11core);
+ if (err)
+ return -ENOENT;
+ }
+
ieee80211_wake_queues(hw);
spin_lock_bh(&wl->lock);
blocked = brcms_rfkill_set_hw_state(wl);
@@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- if (!wl->ucode.bcm43xx_bomminor) {
- err = brcms_request_fw(wl, wl->wlc->hw->d11core);
- if (err) {
- brcms_remove(wl->wlc->hw->d11core);
- return -ENOENT;
- }
- }
-
spin_lock_bh(&wl->lock);
/* avoid acknowledging frames before a non-monitor device is added */
wl->mute_tx = true;
@@ -1094,12 +1092,6 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
* Attach to the WL device identified by vendor and device parameters.
* regs is a host accessible memory address pointing to WL device registers.
*
- * brcms_attach is not defined as static because in the case where no bus
- * is defined, wl_attach will never be called, and thus, gcc will issue
- * a warning that this function is defined but not used if we declare
- * it as static.
- *
- *
* is called in brcms_bcma_probe() context, therefore no locking required.
*/
static struct brcms_info *brcms_attach(struct bcma_device *pdev)
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 6fa5d4863782..d816270db3be 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -43,5 +43,6 @@
#define BCM4335_CHIP_ID 0x4335
#define BCM43362_CHIP_ID 43362
#define BCM4339_CHIP_ID 0x4339
+#define BCM4354_CHIP_ID 0x4354
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 7ca2aa1035b2..74419d4bd123 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -217,6 +217,9 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
#define WSEC_SWFLAG 0x0008
/* to go into transition mode without setting wep */
#define SES_OW_ENABLED 0x0040
+/* MFP */
+#define MFP_CAPABLE 0x0200
+#define MFP_REQUIRED 0x0400
/* WPA authentication mode bitvec */
#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index 5a9ffd3a6a6c..e23d67e0bfe6 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -202,8 +202,8 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
}
/* calculate the block size */
- tx_size = block_size = min((size_t)(firmware->size - put),
- (size_t)DOWNLOAD_BLOCK_SIZE);
+ tx_size = block_size = min_t(size_t, firmware->size - put,
+ DOWNLOAD_BLOCK_SIZE);
memcpy(buf, &firmware->data[put], block_size);
if (block_size < DOWNLOAD_BLOCK_SIZE) {
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 9f825f2620da..b6ec51923b20 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -677,6 +677,8 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12(
"ZoomAir 11Mbps High", "Rate wireless Networking",
0x273fe3db, 0x32a1eaee),
+ PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card",
+ 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID123(
"Pretec", "CompactWLAN Card 802.11b", "2.5",
0x1cadd3e5, 0xe697636c, 0x7a5bfcf1),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 3aba49259ef1..dfc6dfc56d52 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -7065,7 +7065,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
if (wrqu->data.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
- wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
+ wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 139326065bd9..c5aa404069f3 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -9169,7 +9169,7 @@ static int ipw_wx_set_nick(struct net_device *dev,
if (wrqu->data.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
mutex_lock(&priv->mutex);
- wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
+ wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
IPW_DEBUG_TRACE("<<\n");
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 0487461ae4da..dc1d20cf64ee 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -1248,14 +1248,7 @@ il3945_rx_handle(struct il_priv *il)
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
+ reclaim = il_need_reclaim(il, pkt);
/* Based on type of command response or notification,
* handle those that need handling via function in
@@ -1495,12 +1488,14 @@ il3945_irq_tasklet(struct il_priv *il)
if (inta & CSR_INT_BIT_WAKEUP) {
D_ISR("Wakeup interrupt\n");
il_rx_queue_update_write_ptr(il, &il->rxq);
+
+ spin_lock_irqsave(&il->lock, flags);
il_txq_update_write_ptr(il, &il->txq[0]);
il_txq_update_write_ptr(il, &il->txq[1]);
il_txq_update_write_ptr(il, &il->txq[2]);
il_txq_update_write_ptr(il, &il->txq[3]);
il_txq_update_write_ptr(il, &il->txq[4]);
- il_txq_update_write_ptr(il, &il->txq[5]);
+ spin_unlock_irqrestore(&il->lock, flags);
il->isr_stats.wakeup++;
handled |= CSR_INT_BIT_WAKEUP;
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index 9a45f6f626f6..76b0729ade17 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -891,8 +891,7 @@ il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
-static struct rate_control_ops rs_ops = {
- .module = NULL,
+static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = il3945_rs_tx_status,
.get_rate = il3945_rs_get_rate,
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 43f488a8cda2..888ad5c74639 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -92,7 +92,6 @@ il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
* EEPROM
*/
struct il_mod_params il4965_mod_params = {
- .amsdu_size_8K = 1,
.restart_fw = 1,
/* the rest are 0 by default */
};
@@ -4274,17 +4273,7 @@ il4965_rx_handle(struct il_priv *il)
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
- (pkt->hdr.cmd != N_RX_MPDU) &&
- (pkt->hdr.cmd != N_COMPRESSED_BA) &&
- (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
+ reclaim = il_need_reclaim(il, pkt);
/* Based on type of command response or notification,
* handle those that need handling via function in
@@ -6876,6 +6865,6 @@ module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 4d5e33259ca8..eaaeea19d8c5 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2807,8 +2807,7 @@ il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
-static struct rate_control_ops rs_4965_ops = {
- .module = NULL,
+static const struct rate_control_ops rs_4965_ops = {
.name = IL4965_RS_NAME,
.tx_status = il4965_rs_tx_status,
.get_rate = il4965_rs_get_rate,
diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 048421511988..dd744135c956 100644
--- a/drivers/net/wireless/iwlegacy/commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -2270,7 +2270,8 @@ struct il_spectrum_notification {
*/
#define IL_POWER_VEC_SIZE 5
-#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
struct il3945_powertable_cmd {
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 02e8233ccf29..4f42174d9994 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1078,29 +1078,82 @@ EXPORT_SYMBOL(il_get_channel_info);
* Setting power level allows the card to go to sleep when not busy.
*
* We calculate a sleep command based on the required latency, which
- * we get from mac80211. In order to handle thermal throttling, we can
- * also use pre-defined power levels.
+ * we get from mac80211.
*/
-/*
- * This defines the old power levels. They are still used by default
- * (level 1) and for thermal throttle (levels 3 through 5)
- */
-
-struct il_power_vec_entry {
- struct il_powertable_cmd cmd;
- u8 no_dtim; /* number of skip dtim */
-};
+#define SLP_VEC(X0, X1, X2, X3, X4) { \
+ cpu_to_le32(X0), \
+ cpu_to_le32(X1), \
+ cpu_to_le32(X2), \
+ cpu_to_le32(X3), \
+ cpu_to_le32(X4) \
+}
static void
-il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
{
+ const __le32 interval[3][IL_POWER_VEC_SIZE] = {
+ SLP_VEC(2, 2, 4, 6, 0xFF),
+ SLP_VEC(2, 4, 7, 10, 10),
+ SLP_VEC(4, 7, 10, 10, 0xFF)
+ };
+ int i, dtim_period, no_dtim;
+ u32 max_sleep;
+ bool skip;
+
memset(cmd, 0, sizeof(*cmd));
if (il->power_data.pci_pm)
cmd->flags |= IL_POWER_PCI_PM_MSK;
- D_POWER("Sleep command for CAM\n");
+ /* if no Power Save, we are done */
+ if (il->power_data.ps_disabled)
+ return;
+
+ cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
+ cmd->keep_alive_seconds = 0;
+ cmd->debug_flags = 0;
+ cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
+ cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
+ cmd->keep_alive_beacons = 0;
+
+ dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
+
+ if (dtim_period <= 2) {
+ memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
+ no_dtim = 2;
+ } else if (dtim_period <= 10) {
+ memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
+ no_dtim = 2;
+ } else {
+ memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
+ no_dtim = 0;
+ }
+
+ if (dtim_period == 0) {
+ dtim_period = 1;
+ skip = false;
+ } else {
+ skip = !!no_dtim;
+ }
+
+ if (skip) {
+ __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
+
+ max_sleep = le32_to_cpu(tmp);
+ if (max_sleep == 0xFF)
+ max_sleep = dtim_period * (skip + 1);
+ else if (max_sleep > dtim_period)
+ max_sleep = (max_sleep / dtim_period) * dtim_period;
+ cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
+ } else {
+ max_sleep = dtim_period;
+ cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
+ }
+
+ for (i = 0; i < IL_POWER_VEC_SIZE; i++)
+ if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
+ cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
}
static int
@@ -1173,7 +1226,8 @@ il_power_update_mode(struct il_priv *il, bool force)
{
struct il_powertable_cmd cmd;
- il_power_sleep_cam_cmd(il, &cmd);
+ il_build_powertable_cmd(il, &cmd);
+
return il_power_set_mode(il, &cmd, force);
}
EXPORT_SYMBOL(il_power_update_mode);
@@ -5081,6 +5135,7 @@ set_ch_out:
}
if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+ il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
ret = il_power_update_mode(il, false);
if (ret)
D_MAC80211("Error setting sleep level\n");
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index ad123d66ab6c..dfb13c70efe8 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1123,6 +1123,7 @@ struct il_power_mgr {
struct il_powertable_cmd sleep_cmd_next;
int debug_sleep_level_override;
bool pci_pm;
+ bool ps_disabled;
};
struct il_priv {
@@ -1597,7 +1598,7 @@ struct il_mod_params {
int disable_hw_scan; /* def: 0 = use h/w scan */
int num_of_queues; /* def: HW dependent */
int disable_11n; /* def: 0 = 11n capabilities enabled */
- int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
+ int amsdu_size_8K; /* def: 0 = disable 8K amsdu size */
int antenna; /* def: 0 = both antennas (use diversity) */
int restart_fw; /* def: 1 = restart firmware */
};
@@ -1978,6 +1979,20 @@ void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
u32 il_read_targ_mem(struct il_priv *il, u32 addr);
void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+static inline bool il_need_reclaim(struct il_priv *il, struct il_rx_pkt *pkt)
+{
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command. If the packet (e.g. Rx frame)
+ * originated from uCode, there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated, but
+ * apparently a few don't get set; catch them here.
+ */
+ return !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX &&
+ pkt->hdr.cmd != N_RX_PHY && pkt->hdr.cmd != N_RX &&
+ pkt->hdr.cmd != N_RX_MPDU && pkt->hdr.cmd != N_COMPRESSED_BA;
+}
+
static inline void
_il_write8(struct il_priv *il, u32 ofs, u8 val)
{
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 3eb2102ce236..74b3b4de7bb7 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -68,6 +68,19 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLWIFI && IWLDVM=n && IWLMVM=n
+config IWLWIFI_BCAST_FILTERING
+ bool "Enable broadcast filtering"
+ depends on IWLMVM
+ help
+ Say Y here to enable default bcast filtering configuration.
+
+ Enabling broadcast filtering will drop any incoming wireless
+ broadcast frames, except some very specific predefined
+ patterns (e.g. incoming arp requests).
+
+ If unsure, don't enable this option, as some programs might
+ expect incoming broadcasts for their normal operations.
+
menu "Debugging Options"
depends on IWLWIFI
@@ -111,6 +124,7 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
+
bool "iwlwifi device access tracing"
depends on IWLWIFI
depends on EVENT_TRACING
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1fa64429bcc2..3d32f4120174 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
-iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
+iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 562772d85102..c160dad03037 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -109,7 +109,7 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
struct iwl_ucode_capabilities;
-extern struct ieee80211_ops iwlagn_hw_ops;
+extern const struct ieee80211_ops iwlagn_hw_ops;
static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
{
@@ -480,7 +480,7 @@ do { \
} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
-extern const char *iwl_dvm_cmd_strings[REPLY_MAX];
+extern const char *const iwl_dvm_cmd_strings[REPLY_MAX];
static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
{
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 7b140e487deb..758c54eeb206 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -317,7 +317,7 @@ static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.nrg_th_cca = 62,
};
-static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl5150_sensitivity = {
.min_nrg_cck = 95,
.auto_corr_min_ofdm = 90,
.auto_corr_min_ofdm_mrc = 170,
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 73086c1629ca..dd55c9cf7ba8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1582,7 +1582,7 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-struct ieee80211_ops iwlagn_hw_ops = {
+const struct ieee80211_ops iwlagn_hw_ops = {
.tx = iwlagn_mac_tx,
.start = iwlagn_mac_start,
.stop = iwlagn_mac_stop,
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index ba1b1ea54252..6a6df71af1d7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -252,13 +252,17 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
struct iwl_priv *priv =
container_of(work, struct iwl_priv, bt_runtime_config);
+ mutex_lock(&priv->mutex);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
+ goto out;
/* dont send host command if rf-kill is on */
if (!iwl_is_ready_rf(priv))
- return;
+ goto out;
+
iwlagn_send_advance_bt_config(priv);
+out:
+ mutex_unlock(&priv->mutex);
}
static void iwl_bg_bt_full_concurrency(struct work_struct *work)
@@ -2035,7 +2039,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
ieee80211_free_txskb(priv->hw, skb);
}
-static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -2045,6 +2049,8 @@ static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
+
+ return false;
}
static const struct iwl_op_mode_ops iwl_dvm_ops = {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 0977d93b529d..aa773a2da4ab 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -176,46 +176,46 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
* (2.4 GHz) band.
*/
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
};
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
{0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
{0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
};
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
{0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
{0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
{0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
{0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
{0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
};
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
{0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
{0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
{0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
@@ -1111,7 +1111,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl)
{
/* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
/* Check for invalid LQ type */
if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
@@ -1173,9 +1173,8 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
-
/* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
+ const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@@ -3319,8 +3318,8 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
struct ieee80211_sta *sta, void *priv_sta)
{
}
-static struct rate_control_ops rs_ops = {
- .module = NULL,
+
+static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index bdd5644a400b..f6bd25cad203 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -315,7 +315,7 @@ struct iwl_scale_tbl_info {
u8 is_dup; /* 1 = duplicated data streams */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
u8 max_search; /* maximun number of tables we can search */
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
u32 current_rate; /* rate_n_flags, uCode API format */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 7a1bc1c547e1..cd8377346aff 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -39,7 +39,7 @@
#define IWL_CMD_ENTRY(x) [x] = #x
-const char *iwl_dvm_cmd_strings[REPLY_MAX] = {
+const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
IWL_CMD_ENTRY(REPLY_ALIVE),
IWL_CMD_ENTRY(REPLY_ERROR),
IWL_CMD_ENTRY(REPLY_ECHO),
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 2a59da2ff87a..003a546571d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,8 +71,8 @@
#define IWL3160_UCODE_API_MAX 8
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 7
-#define IWL3160_UCODE_API_OK 7
+#define IWL7260_UCODE_API_OK 8
+#define IWL3160_UCODE_API_OK 8
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 7
@@ -95,6 +95,8 @@
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define NVM_HW_SECTION_NUM_FAMILY_7000 0
+
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -120,7 +122,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl7000_base_params, \
- .led_mode = IWL_LED_RF_STATE
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000
const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -131,6 +134,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
+ .lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@@ -142,6 +146,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.high_temp = true,
.host_interrupt_operation_mode = true,
+ .lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_2n_cfg = {
@@ -152,6 +157,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
+ .lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_n_cfg = {
@@ -162,6 +168,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
+ .lp_xtal_workaround = true,
};
const struct iwl_cfg iwl3160_2ac_cfg = {
@@ -194,6 +201,17 @@ const struct iwl_cfg iwl3160_n_cfg = {
.host_interrupt_operation_mode = true,
};
+static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
+ {.pwr = 1600, .backoff = 0},
+ {.pwr = 1300, .backoff = 467},
+ {.pwr = 900, .backoff = 1900},
+ {.pwr = 800, .backoff = 2630},
+ {.pwr = 700, .backoff = 3720},
+ {.pwr = 600, .backoff = 5550},
+ {.pwr = 500, .backoff = 9350},
+ {0},
+};
+
const struct iwl_cfg iwl7265_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7265",
.fw_name_pre = IWL7265_FW_PRE,
@@ -201,6 +219,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_2n_cfg = {
@@ -210,6 +229,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_n_cfg = {
@@ -219,6 +239,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
new file mode 100644
index 000000000000..f5bd82b88592
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL8000_UCODE_API_MAX 8
+
+/* Oldest version we won't warn about */
+#define IWL8000_UCODE_API_OK 8
+
+/* Lowest firmware API version supported */
+#define IWL8000_UCODE_API_MIN 8
+
+/* NVM versions */
+#define IWL8000_NVM_VERSION 0x0a1d
+#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
+
+#define IWL8000_FW_PRE "iwlwifi-8000-"
+#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_8000 10
+
+static const struct iwl_base_params iwl8000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .pll_cfg_val = 0,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl8000_ht_params = {
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_8000 \
+ .ucode_api_max = IWL8000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL8000_UCODE_API_OK, \
+ .ucode_api_min = IWL8000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_8000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl8000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000
+
+const struct iwl_cfg iwl8260_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 8260",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl8260_n_cfg = {
+ .name = "Intel(R) Dual Band Wireless-AC 8260",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 1ced525157dc..3f17dc3f2c8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -84,6 +84,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_6050,
IWL_DEVICE_FAMILY_6150,
IWL_DEVICE_FAMILY_7000,
+ IWL_DEVICE_FAMILY_8000,
};
/*
@@ -192,6 +193,15 @@ struct iwl_eeprom_params {
bool enhanced_txpower;
};
+/* Tx-backoff power threshold
+ * @pwr: The power limit in mw
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_pwr_tx_backoff {
+ u32 pwr;
+ u32 backoff;
+};
+
/**
* struct iwl_cfg
* @name: Offical name of the device
@@ -217,6 +227,9 @@ struct iwl_eeprom_params {
* @high_temp: Is this NIC is designated to be in high temperature.
* @host_interrupt_operation_mode: device needs host interrupt operation
* mode set
+ * @d0i3: device uses d0i3 instead of d3
+ * @nvm_hw_section_num: the ID of the HW NVM section
+ * @pwr_tx_backoffs: translation table between power limits and backoffs
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -247,6 +260,10 @@ struct iwl_cfg {
const bool internal_wimax_coex;
const bool host_interrupt_operation_mode;
bool high_temp;
+ bool d0i3;
+ u8 nvm_hw_section_num;
+ bool lp_xtal_workaround;
+ const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
};
/*
@@ -307,6 +324,8 @@ extern const struct iwl_cfg iwl3160_n_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg;
extern const struct iwl_cfg iwl7265_2n_cfg;
extern const struct iwl_cfg iwl7265_n_cfg;
+extern const struct iwl_cfg iwl8260_2ac_cfg;
+extern const struct iwl_cfg iwl8260_n_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 9d325516c42d..fe129c94ae3e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -139,6 +139,13 @@
#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
/*
+ * CSR HW resources monitor registers
+ */
+#define CSR_MONITOR_CFG_REG (CSR_BASE+0x214)
+#define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228)
+#define CSR_MONITOR_XTAL_RESOURCES (0x00000010)
+
+/*
* CSR Hardware Revision Workaround Register. Indicates hardware rev;
* "step" determines CCK backoff for txpower calculation. Used for 4965 only.
* See also CSR_HW_REV register.
@@ -173,6 +180,7 @@
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
@@ -240,6 +248,7 @@
* 001 -- MAC power-down
* 010 -- PHY (radio) power-down
* 011 -- Error
+ * 10: XTAL ON request
* 9-6: SYS_CONFIG
* Indicates current system configuration, reflecting pins on chip
* as forced high/low by device circuit board.
@@ -271,6 +280,7 @@
#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
@@ -395,37 +405,33 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
-/* SECURE boot registers */
-#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
-enum secure_boot_config_reg {
- CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
- CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
-};
-
-#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
-#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
-enum secure_boot_status_reg {
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
- CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
-};
-
-#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
-enum secure_load_status_reg {
- CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
- CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
- CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
- CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
-};
-
-#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
-#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
-
-#define CSR_SECURE_TIME_OUT (100)
+/*
+ * SHR target access (Shared block memory space)
+ *
+ * Shared internal registers can be accessed directly from PCI bus through SHR
+ * arbiter without need for the MAC HW to be powered up. This is possible due to
+ * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and
+ * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers.
+ *
+ * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW
+ * need not be powered up so no "grab inc access" is required.
+ */
-#define FH_TCSR_0_REG0 (0x1D00)
+/*
+ * Registers for accessing shared registers (e.g. SHR_APMG_GP1,
+ * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC),
+ * first, write to the control register:
+ * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
+ * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access)
+ * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0].
+ *
+ * To write the register, first, write to the data register
+ * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then:
+ * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
+ * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access)
+ */
+#define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec)
+#define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4)
/*
* HBUS (Host-side Bus)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index a75aac986a23..c8cbdbe15924 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -126,6 +126,7 @@ do { \
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER 0x00000100
#define IWL_DL_TEMP 0x00000200
+#define IWL_DL_RPM 0x00000400
#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC 0x00001000
@@ -189,5 +190,6 @@ do { \
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 75103554cd63..0a3e841b44a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -128,7 +128,7 @@ struct iwl_drv {
const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
- char firmware_name[25]; /* name of firmware file to load */
+ char firmware_name[32]; /* name of firmware file to load */
struct completion request_firmware_complete;
@@ -237,7 +237,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
- sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
+ name_pre, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
@@ -403,6 +404,38 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
return 0;
}
+static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
+ struct iwl_ucode_capabilities *capa)
+{
+ const struct iwl_ucode_api *ucode_api = (void *)data;
+ u32 api_index = le32_to_cpu(ucode_api->api_index);
+
+ if (api_index >= IWL_API_ARRAY_SIZE) {
+ IWL_ERR(drv, "api_index larger than supported by driver\n");
+ return -EINVAL;
+ }
+
+ capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
+
+ return 0;
+}
+
+static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
+ struct iwl_ucode_capabilities *capa)
+{
+ const struct iwl_ucode_capa *ucode_capa = (void *)data;
+ u32 api_index = le32_to_cpu(ucode_capa->api_index);
+
+ if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
+ IWL_ERR(drv, "api_index larger than supported by driver\n");
+ return -EINVAL;
+ }
+
+ capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
+
+ return 0;
+}
+
static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces)
@@ -637,6 +670,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
*/
capa->flags = le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_API_CHANGES_SET:
+ if (tlv_len != sizeof(struct iwl_ucode_api))
+ goto invalid_tlv_len;
+ if (iwl_set_ucode_api_flags(drv, tlv_data, capa))
+ goto tlv_error;
+ break;
+ case IWL_UCODE_TLV_ENABLED_CAPABILITIES:
+ if (tlv_len != sizeof(struct iwl_ucode_capa))
+ goto invalid_tlv_len;
+ if (iwl_set_ucode_capabilities(drv, tlv_data, capa))
+ goto tlv_error;
+ break;
case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
@@ -727,6 +772,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
+ drv->fw.valid_tx_ant = (drv->fw.phy_config &
+ FW_PHY_CFG_TX_CHAIN) >>
+ FW_PHY_CFG_TX_CHAIN_POS;
+ drv->fw.valid_rx_ant = (drv->fw.phy_config &
+ FW_PHY_CFG_RX_CHAIN) >>
+ FW_PHY_CFG_RX_CHAIN_POS;
break;
case IWL_UCODE_TLV_SECURE_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
@@ -1300,8 +1351,7 @@ MODULE_PARM_DESC(antenna_coupling,
module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
- "Disable stuck queue watchdog timer 0=system default, "
- "1=disable, 2=enable (default: 0)");
+ "Disable stuck queue watchdog timer 0=system default, 1=disable (default: 1)");
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
MODULE_PARM_DESC(nvm_file, "NVM file name");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 592c01e11013..3c72cb710b0c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -70,6 +70,20 @@
#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
+/* radio config bits (actual values from NVM definition) */
+#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
+#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(x) (x & 0xF)
+#define NVM_RF_CFG_DASH_MSK_FAMILY_8000(x) ((x >> 4) & 0xF)
+#define NVM_RF_CFG_STEP_MSK_FAMILY_8000(x) ((x >> 8) & 0xF)
+#define NVM_RF_CFG_TYPE_MSK_FAMILY_8000(x) ((x >> 12) & 0xFFF)
+#define NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(x) ((x >> 24) & 0xF)
+#define NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(x) ((x >> 28) & 0xF)
/**
* DOC: Driver system flows - drv component
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index e3c7deafabe6..f0548b8a64b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -81,16 +81,17 @@ struct iwl_nvm_data {
bool sku_cap_band_24GHz_enable;
bool sku_cap_band_52GHz_enable;
bool sku_cap_11n_enable;
+ bool sku_cap_11ac_enable;
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
- u8 radio_cfg_type;
+ u16 radio_cfg_type;
u8 radio_cfg_step;
u8 radio_cfg_dash;
u8 radio_cfg_pnum;
u8 valid_tx_ant, valid_rx_ant;
- u16 nvm_version;
+ u32 nvm_version;
s8 max_tx_pwr_half_dbm;
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 88e2d6eb569f..b45e576a4b57 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -126,6 +126,8 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
IWL_UCODE_TLV_NUM_OF_CPU = 27,
IWL_UCODE_TLV_CSCHEME = 28,
+ IWL_UCODE_TLV_API_CHANGES_SET = 29,
+ IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
};
struct iwl_ucode_tlv {
@@ -158,4 +160,19 @@ struct iwl_tlv_ucode_header {
u8 data[0];
};
+/*
+ * ucode TLVs
+ *
+ * ability to get extension for: flags & capabilities from ucode binaries files
+ */
+struct iwl_ucode_api {
+ __le32 api_index;
+ __le32 api_flags;
+} __packed;
+
+struct iwl_ucode_capa {
+ __le32 api_index;
+ __le32 api_capa;
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 5f1493c44097..d14f19339d61 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -92,9 +92,11 @@
* @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
* @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
* containing CAM (Continuous Active Mode) indication.
- * @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
- * single bound interface).
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
+ * P2P client interfaces simultaneously if they are in different bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -116,9 +118,27 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
- IWL_UCODE_TLV_FLAGS_P2P_PS = BIT(21),
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
+ IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
+ IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
+};
+
+/**
+ * enum iwl_ucode_tlv_api - ucode api
+ * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
+ */
+enum iwl_ucode_tlv_api {
+ IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
+};
+
+/**
+ * enum iwl_ucode_tlv_capa - ucode capabilities
+ * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ */
+enum iwl_ucode_tlv_capa {
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
};
/* The default calibrate table size if not specified by firmware file */
@@ -160,13 +180,16 @@ enum iwl_ucode_sec {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 6
-#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2)
+#define IWL_UCODE_SECTION_MAX 12
+#define IWL_API_ARRAY_SIZE 1
+#define IWL_CAPABILITIES_ARRAY_SIZE 1
struct iwl_ucode_capabilities {
u32 max_probe_length;
u32 standard_phy_calibration_size;
u32 flags;
+ u32 api[IWL_API_ARRAY_SIZE];
+ u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
};
/* one for each uCode image (inst/data, init/runtime/wowlan) */
@@ -285,22 +308,12 @@ struct iwl_fw {
struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
u32 phy_config;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
bool mvm_fw;
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
};
-static inline u8 iwl_fw_valid_tx_ant(const struct iwl_fw *fw)
-{
- return (fw->phy_config & FW_PHY_CFG_TX_CHAIN) >>
- FW_PHY_CFG_TX_CHAIN_POS;
-}
-
-static inline u8 iwl_fw_valid_rx_ant(const struct iwl_fw *fw)
-{
- return (fw->phy_config & FW_PHY_CFG_RX_CHAIN) >>
- FW_PHY_CFG_RX_CHAIN_POS;
-}
-
#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index f98175a0d35b..44cc3cf45762 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -93,14 +93,14 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
}
IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
-static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
+u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read_prph(trans, ofs);
trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
return val;
}
-static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
iwl_trans_write_prph(trans, ofs, val);
@@ -130,6 +130,21 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
}
IWL_EXPORT_SYMBOL(iwl_write_prph);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+ u32 bits, u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
+ return t;
+ udelay(IWL_POLL_INTERVAL);
+ t += IWL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index c339c1bed080..665ddd9dbbc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -70,8 +70,12 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
+u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
+void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+ u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index b29075c3da8e..d994317db85b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -96,7 +96,7 @@ enum iwl_disable_11n {
* use IWL_[DIS,EN]ABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1
- * @wd_disable: enable stuck queue check, default = 0
+ * @wd_disable: disable stuck queue check, default = 1
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
* @power_save: disable power save, default = false
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 725e954d8475..6be30c698506 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -71,7 +71,7 @@ enum wkp_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
HW_ADDR = 0x15,
-/* NVM SW-Section offset (in words) definitions */
+ /* NVM SW-Section offset (in words) definitions */
NVM_SW_SECTION = 0x1C0,
NVM_VERSION = 0,
RADIO_CFG = 1,
@@ -79,11 +79,32 @@ enum wkp_nvm_offsets {
N_HW_ADDRS = 3,
NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
-/* NVM calibration section offset (in words) definitions */
+ /* NVM calibration section offset (in words) definitions */
NVM_CALIB_SECTION = 0x2B8,
XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
};
+enum family_8000_nvm_offsets {
+ /* NVM HW-Section offset (in words) definitions */
+ HW_ADDR0_FAMILY_8000 = 0x12,
+ HW_ADDR1_FAMILY_8000 = 0x16,
+ MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1,
+
+ /* NVM SW-Section offset (in words) definitions */
+ NVM_SW_SECTION_FAMILY_8000 = 0x1C0,
+ NVM_VERSION_FAMILY_8000 = 0,
+ RADIO_CFG_FAMILY_8000 = 2,
+ SKU_FAMILY_8000 = 4,
+ N_HW_ADDRS_FAMILY_8000 = 5,
+
+ /* NVM REGULATORY -Section offset (in words) definitions */
+ NVM_CHANNELS_FAMILY_8000 = 0,
+
+ /* NVM calibration section offset (in words) definitions */
+ NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
+ XTAL_CALIB_FAMILY_8000 = 0x316 - NVM_CALIB_SECTION_FAMILY_8000
+};
+
/* SKU Capabilities (actual values from NVM definition) */
enum nvm_sku_bits {
NVM_SKU_CAP_BAND_24GHZ = BIT(0),
@@ -92,14 +113,6 @@ enum nvm_sku_bits {
NVM_SKU_CAP_11AC_ENABLE = BIT(3),
};
-/* radio config bits (actual values from NVM definition) */
-#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
-#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
/*
* These are the channel numbers in the order that they are stored in the NVM
*/
@@ -112,7 +125,17 @@ static const u8 iwl_nvm_channels[] = {
149, 153, 157, 161, 165
};
+static const u8 iwl_nvm_channels_family_8000[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181
+};
+
#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
#define NUM_2GHZ_CHANNELS 14
#define FIRST_2GHZ_HT_MINUS 5
#define LAST_2GHZ_HT_PLUS 9
@@ -179,8 +202,18 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct ieee80211_channel *channel;
u16 ch_flags;
bool is_5ghz;
+ int num_of_ch;
+ const u8 *nvm_chan;
+
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ num_of_ch = IWL_NUM_CHANNELS;
+ nvm_chan = &iwl_nvm_channels[0];
+ } else {
+ num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
+ nvm_chan = &iwl_nvm_channels_family_8000[0];
+ }
- for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
+ for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
if (ch_idx >= NUM_2GHZ_CHANNELS &&
@@ -190,7 +223,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
if (!(ch_flags & NVM_CHANNEL_VALID)) {
IWL_DEBUG_EEPROM(dev,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
- iwl_nvm_channels[ch_idx],
+ nvm_chan[ch_idx],
ch_flags,
(ch_idx >= NUM_2GHZ_CHANNELS) ?
"5.2" : "2.4");
@@ -200,7 +233,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel = &data->channels[n_channels];
n_channels++;
- channel->hw_value = iwl_nvm_channels[ch_idx];
+ channel->hw_value = nvm_chan[ch_idx];
channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
channel->center_freq =
@@ -211,11 +244,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags = IEEE80211_CHAN_NO_HT40;
if (ch_idx < NUM_2GHZ_CHANNELS &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
- if (iwl_nvm_channels[ch_idx] <= LAST_2GHZ_HT_PLUS)
+ if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
- if (iwl_nvm_channels[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+ if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
- } else if (iwl_nvm_channels[ch_idx] <= LAST_5GHZ_HT &&
+ } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -266,9 +299,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
- struct ieee80211_sta_vht_cap *vht_cap)
+ struct ieee80211_sta_vht_cap *vht_cap,
+ u8 tx_chains, u8 rx_chains)
{
- int num_ants = num_of_ant(data->valid_rx_ant);
+ int num_rx_ants = num_of_ant(rx_chains);
+ int num_tx_ants = num_of_ant(tx_chains);
vht_cap->vht_supported = true;
@@ -278,8 +313,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
- if (num_ants > 1)
+ if (num_tx_ants > 1)
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+ else
+ vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
if (iwlwifi_mod_params.amsdu_size_8K)
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
@@ -294,10 +331,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
- if (num_ants == 1 ||
- cfg->rx_with_siso_diversity) {
- vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
- IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+ if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
+ vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
/* this works because NOT_SUPPORTED == 3 */
vht_cap->vht_mcs.rx_mcs_map |=
cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
@@ -307,14 +342,23 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
}
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data, const __le16 *nvm_sw,
- bool enable_vht, u8 tx_chains, u8 rx_chains)
+ struct iwl_nvm_data *data,
+ const __le16 *ch_section, bool enable_vht,
+ u8 tx_chains, u8 rx_chains)
{
- int n_channels = iwl_init_channel_map(dev, cfg, data,
- &nvm_sw[NVM_CHANNELS]);
+ int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ n_channels = iwl_init_channel_map(
+ dev, cfg, data,
+ &ch_section[NVM_CHANNELS]);
+ else
+ n_channels = iwl_init_channel_map(
+ dev, cfg, data,
+ &ch_section[NVM_CHANNELS_FAMILY_8000]);
+
sband = &data->bands[IEEE80211_BAND_2GHZ];
sband->band = IEEE80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@ -333,80 +377,160 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
tx_chains, rx_chains);
if (enable_vht)
- iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
+ iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
+ tx_chains, rx_chains);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
}
+static int iwl_get_sku(const struct iwl_cfg *cfg,
+ const __le16 *nvm_sw)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + SKU);
+ else
+ return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000));
+}
+
+static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
+ const __le16 *nvm_sw)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + NVM_VERSION);
+ else
+ return le32_to_cpup((__le32 *)(nvm_sw +
+ NVM_VERSION_FAMILY_8000));
+}
+
+static int iwl_get_radio_cfg(const struct iwl_cfg *cfg,
+ const __le16 *nvm_sw)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + RADIO_CFG);
+ else
+ return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+}
+
+#define N_HW_ADDRS_MASK_FAMILY_8000 0xF
+static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg,
+ const __le16 *nvm_sw)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + N_HW_ADDRS);
+ else
+ return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000))
+ & N_HW_ADDRS_MASK_FAMILY_8000;
+}
+
+static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ u32 radio_cfg)
+{
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
+ data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
+ data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
+ data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
+ return;
+ }
+
+ /* set the radio configuration for family 8000 */
+ data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK_FAMILY_8000(radio_cfg);
+ data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg);
+ data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg);
+ data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg);
+}
+
+static void iwl_set_hw_address(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ const __le16 *nvm_sec)
+{
+ u8 hw_addr[ETH_ALEN];
+
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ memcpy(hw_addr, nvm_sec + HW_ADDR, ETH_ALEN);
+ else
+ memcpy(hw_addr, nvm_sec + MAC_ADDRESS_OVERRIDE_FAMILY_8000,
+ ETH_ALEN);
+
+ /* The byte order is little endian 16 bit, meaning 214365 */
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+}
+
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
- const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains)
+ const __le16 *nvm_calib, const __le16 *regulatory,
+ const __le16 *mac_override, u8 tx_chains, u8 rx_chains)
{
struct iwl_nvm_data *data;
- u8 hw_addr[ETH_ALEN];
- u16 radio_cfg, sku;
-
- data = kzalloc(sizeof(*data) +
- sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
- GFP_KERNEL);
+ u32 sku;
+ u32 radio_cfg;
+
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ data = kzalloc(sizeof(*data) +
+ sizeof(struct ieee80211_channel) *
+ IWL_NUM_CHANNELS,
+ GFP_KERNEL);
+ else
+ data = kzalloc(sizeof(*data) +
+ sizeof(struct ieee80211_channel) *
+ IWL_NUM_CHANNELS_FAMILY_8000,
+ GFP_KERNEL);
if (!data)
return NULL;
- data->nvm_version = le16_to_cpup(nvm_sw + NVM_VERSION);
+ data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
- radio_cfg = le16_to_cpup(nvm_sw + RADIO_CFG);
- data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
- data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
- data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
- data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
- data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
- data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+ radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
+ iwl_set_radio_cfg(cfg, data, radio_cfg);
- sku = le16_to_cpup(nvm_sw + SKU);
+ sku = iwl_get_sku(cfg, nvm_sw);
data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
+ data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
data->sku_cap_11n_enable = false;
- /* check overrides (some devices have wrong NVM) */
- if (cfg->valid_tx_ant)
- data->valid_tx_ant = cfg->valid_tx_ant;
- if (cfg->valid_rx_ant)
- data->valid_rx_ant = cfg->valid_rx_ant;
+ data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
- if (!data->valid_tx_ant || !data->valid_rx_ant) {
- IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
- data->valid_tx_ant, data->valid_rx_ant);
- kfree(data);
- return NULL;
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ /* Checking for required sections */
+ if (!nvm_calib) {
+ IWL_ERR_DEV(dev,
+ "Can't parse empty Calib NVM sections\n");
+ kfree(data);
+ return NULL;
+ }
+ /* in family 8000 Xtal calibration values moved to OTP */
+ data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
+ data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
}
- data->n_hw_addrs = le16_to_cpup(nvm_sw + N_HW_ADDRS);
+ if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_set_hw_address(cfg, data, nvm_hw);
- data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
- data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
+ iwl_init_sbands(dev, cfg, data, nvm_sw,
+ sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
+ rx_chains);
+ } else {
+ /* MAC address in family 8000 */
+ iwl_set_hw_address(cfg, data, mac_override);
- /* The byte order is little endian 16 bit, meaning 214365 */
- memcpy(hw_addr, nvm_hw + HW_ADDR, ETH_ALEN);
- data->hw_addr[0] = hw_addr[1];
- data->hw_addr[1] = hw_addr[0];
- data->hw_addr[2] = hw_addr[3];
- data->hw_addr[3] = hw_addr[2];
- data->hw_addr[4] = hw_addr[5];
- data->hw_addr[5] = hw_addr[4];
-
- iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
- tx_chains, rx_chains);
+ iwl_init_sbands(dev, cfg, data, regulatory,
+ sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
+ rx_chains);
+ }
- data->calib_version = 255; /* TODO:
- this value will prevent some checks from
- failing, we need to check if this
- field is still needed, and if it does,
- where is it in the NVM*/
+ data->calib_version = 255;
return data;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index 0c4399aba8c6..c9c45a39d212 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -75,6 +75,7 @@
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
- const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains);
+ const __le16 *nvm_calib, const __le16 *regulatory,
+ const __le16 *mac_override, u8 tx_chains, u8 rx_chains);
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index b5be51f3cd3d..ea29504ac617 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -119,7 +119,8 @@ struct iwl_cfg;
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
- * the radio is killed. May sleep.
+ * the radio is killed. Return %true if the device should be stopped by
+ * the transport immediately after the call. May sleep.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
@@ -131,6 +132,8 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
+ * @enter_d0i3: configure the fw to enter d0i3. May sleep.
+ * @exit_d0i3: configure the fw to exit d0i3. May sleep.
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -142,12 +145,14 @@ struct iwl_op_mode_ops {
struct iwl_device_cmd *cmd);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
- void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
+ bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
void (*nic_error)(struct iwl_op_mode *op_mode);
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
+ int (*enter_d0i3)(struct iwl_op_mode *op_mode);
+ int (*exit_d0i3)(struct iwl_op_mode *op_mode);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -155,7 +160,7 @@ void iwl_opmode_deregister(const char *name);
/**
* struct iwl_op_mode - operational mode
- * @ops - pointer to its own ops
+ * @ops: pointer to its own ops
*
* This holds an implementation of the mac80211 / fw API.
*/
@@ -191,11 +196,11 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
op_mode->ops->queue_not_full(op_mode, queue);
}
-static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
- bool state)
+static inline bool __must_check
+iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
{
might_sleep();
- op_mode->ops->hw_rf_kill(op_mode, state);
+ return op_mode->ops->hw_rf_kill(op_mode, state);
}
static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
@@ -226,4 +231,22 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
op_mode->ops->wimax_active(op_mode);
}
+static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+
+ if (!op_mode->ops->enter_d0i3)
+ return 0;
+ return op_mode->ops->enter_d0i3(op_mode);
+}
+
+static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+
+ if (!op_mode->ops->exit_d0i3)
+ return 0;
+ return op_mode->ops->exit_d0i3(op_mode);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index fa77d63a277a..b761ac4822a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -72,7 +72,7 @@
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 4
+#define IWL_NUM_PAPD_CH_GROUPS 7
#define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry {
@@ -383,7 +383,7 @@ static int iwl_phy_db_send_all_channel_groups(
if (!entry)
return -EINVAL;
- if (WARN_ON_ONCE(!entry->size))
+ if (!entry->size)
continue;
/* Send the requested PHY DB section */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 100bd0d79681..5f657c501406 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -95,7 +95,8 @@
#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
-#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200)
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
#define APMG_RTC_INT_STT_RFKILL (0x10000000)
@@ -105,6 +106,33 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
+/* Shared registers (0x0..0x3ff, via target indirect or periphery */
+#define SHR_BASE 0x00a10000
+
+/* Shared GP1 register */
+#define SHR_APMG_GP1_REG 0x01dc
+#define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG)
+#define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004
+#define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000
+
+/* Shared DL_CFG register */
+#define SHR_APMG_DL_CFG_REG 0x01c4
+#define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG)
+#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0
+#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080
+#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100
+
+/* Shared APMG_XTAL_CFG register */
+#define SHR_APMG_XTAL_CFG_REG 0x1c0
+#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000
+
+/*
+ * Device reset for family 8000
+ * write to bit 24 in order to reset the CPU
+*/
+#define RELEASE_CPU_RESET (0x300C)
+#define RELEASE_CPU_RESET_BIT BIT(24)
+
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
*****************************************************************************/
@@ -281,4 +309,43 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
+/* SECURE boot registers */
+#define LMPM_SECURE_BOOT_CONFIG_ADDR (0x100)
+enum secure_boot_config_reg {
+ LMPM_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
+ LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
+};
+
+#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
+#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
+enum secure_boot_status_reg {
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000001,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
+ LMPM_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
+enum secure_load_status_reg {
+ LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
+ LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
+ LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
+ LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
+ LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
+};
+
+#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
+#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
+#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
+#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
+
+#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
+#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
+#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
+#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
+
+#define LMPM_SECURE_TIME_OUT (100)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 1f065cf4a4ba..8cdb0dd618a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -193,12 +193,23 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_ASYNC: Return right away and don't wait for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
+ * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
+ * command queue, but after other high priority commands. valid only
+ * with CMD_ASYNC.
+ * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
+ * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
+ * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
+ * (i.e. mark it as non-idle).
*/
enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
+ CMD_HIGH_PRIO = BIT(3),
+ CMD_SEND_IN_IDLE = BIT(4),
+ CMD_MAKE_TRANS_IDLE = BIT(5),
+ CMD_WAKE_UP_TRANS = BIT(6),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -335,6 +346,9 @@ enum iwl_d3_status {
* @STATUS_INT_ENABLED: interrupts are enabled
* @STATUS_RFKILL: the HW RFkill switch is in KILL position
* @STATUS_FW_ERROR: the fw is in error state
+ * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
+ * are sent
+ * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -343,6 +357,8 @@ enum iwl_trans_status {
STATUS_INT_ENABLED,
STATUS_RFKILL,
STATUS_FW_ERROR,
+ STATUS_TRANS_GOING_IDLE,
+ STATUS_TRANS_IDLE,
};
/**
@@ -377,7 +393,7 @@ struct iwl_trans_config {
bool rx_buf_size_8k;
bool bc_table_dword;
unsigned int queue_watchdog_timeout;
- const char **command_names;
+ const char *const *command_names;
};
struct iwl_trans;
@@ -443,6 +459,11 @@ struct iwl_trans;
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
* @set_bits_mask - set SRAM register according to value and mask.
+ * @ref: grab a reference to the transport/FW layers, disallowing
+ * certain low power states
+ * @unref: release a reference previously taken with @ref. Note that
+ * initially the reference count is 1, making an initial @unref
+ * necessary to allow low power states.
*/
struct iwl_trans_ops {
@@ -489,6 +510,8 @@ struct iwl_trans_ops {
unsigned long *flags);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
u32 value);
+ void (*ref)(struct iwl_trans *trans);
+ void (*unref)(struct iwl_trans *trans);
};
/**
@@ -523,6 +546,7 @@ enum iwl_trans_state {
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
+ * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -551,6 +575,8 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
+ u64 dflt_pwr_limit;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -627,6 +653,18 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
return trans->ops->d3_resume(trans, status, test);
}
+static inline void iwl_trans_ref(struct iwl_trans *trans)
+{
+ if (trans->ops->ref)
+ trans->ops->ref(trans);
+}
+
+static inline void iwl_trans_unref(struct iwl_trans *trans)
+{
+ if (trans->ops->unref)
+ trans->ops->unref(trans);
+}
+
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index f98ec2b23898..ccdd3b7c4cce 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,8 +2,8 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o power_legacy.o bt-coex.o
-iwlmvm-y += led.o tt.o
+iwlmvm-y += power.o coex.o
+iwlmvm-y += led.o tt.o offloading.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 18a895a949d4..685f7e8e6943 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -61,9 +61,11 @@
*
*****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
-#include "fw-api-bt-coex.h"
+#include "fw-api-coex.h"
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-debug.h"
@@ -305,6 +307,215 @@ static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
cpu_to_le32(0x33113311),
};
+struct corunning_block_luts {
+ u8 range;
+ __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
+};
+
+/*
+ * Ranges for the antenna coupling calibration / co-running block LUT:
+ * LUT0: [ 0, 12[
+ * LUT1: [12, 20[
+ * LUT2: [20, 21[
+ * LUT3: [21, 23[
+ * LUT4: [23, 27[
+ * LUT5: [27, 30[
+ * LUT6: [30, 32[
+ * LUT7: [32, 33[
+ * LUT8: [33, - [
+ */
+static const struct corunning_block_luts antenna_coupling_ranges[] = {
+ {
+ .range = 0,
+ .lut20 = {
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 12,
+ .lut20 = {
+ cpu_to_le32(0x00000001), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 20,
+ .lut20 = {
+ cpu_to_le32(0x00000002), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 21,
+ .lut20 = {
+ cpu_to_le32(0x00000003), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 23,
+ .lut20 = {
+ cpu_to_le32(0x00000004), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 27,
+ .lut20 = {
+ cpu_to_le32(0x00000005), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 30,
+ .lut20 = {
+ cpu_to_le32(0x00000006), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 32,
+ .lut20 = {
+ cpu_to_le32(0x00000007), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+ {
+ .range = 33,
+ .lut20 = {
+ cpu_to_le32(0x00000008), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
+ },
+ },
+};
+
static enum iwl_bt_coex_lut_type
iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
{
@@ -378,7 +589,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
- flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
bt_cmd->flags = cpu_to_le32(flags);
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
@@ -391,14 +601,26 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_LUT |
BT_VALID_WIFI_RX_SW_PRIO_BOOST |
BT_VALID_WIFI_TX_SW_PRIO_BOOST |
- BT_VALID_CORUN_LUT_20 |
- BT_VALID_CORUN_LUT_40 |
BT_VALID_ANT_ISOLATION |
BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS |
BT_VALID_TXRX_MAX_FREQ_0 |
BT_VALID_SYNC_TO_SCO);
+ if (IWL_MVM_BT_COEX_SYNC2SCO)
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
+
+ if (IWL_MVM_BT_COEX_CORUNNING) {
+ bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40);
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
+ }
+
+ if (IWL_MVM_BT_COEX_MPLUT) {
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
+ bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+ }
+
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant));
@@ -406,6 +628,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
sizeof(iwl_combined_lookup));
+ /* Take first Co-running block LUT to get started */
+ memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
+ sizeof(bt_cmd->bt4_corun_lut20));
+ memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
+ sizeof(bt_cmd->bt4_corun_lut40));
+
memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
sizeof(iwl_bt_prio_boost));
memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
@@ -489,36 +717,26 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
return ret;
}
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
- bool enable)
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
{
struct iwl_bt_coex_cmd *bt_cmd;
/* Send ASYNC since this can be sent from an atomic context */
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_DUP, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
.flags = CMD_ASYNC,
};
-
- struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int ret;
- if (sta_id == IWL_MVM_STATION_COUNT)
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (!mvmsta)
return 0;
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /* This can happen if the station has been removed right now */
- if (IS_ERR_OR_NULL(sta))
- return 0;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
/* nothing to do */
- if (mvmsta->bt_reduced_txpower == enable)
+ if (mvmsta->bt_reduced_txpower_dbg ||
+ mvmsta->bt_reduced_txpower == enable)
return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -552,6 +770,7 @@ struct iwl_bt_iterator_data {
bool reduced_tx_power;
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
+ bool primary_ll;
};
static inline
@@ -577,72 +796,113 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
+ u32 bt_activity_grading;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
- return;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ /* default smps_mode for BSS / P2P client is AUTOMATIC */
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ data->num_bss_ifaces++;
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ /*
+ * Count unassoc BSSes, relax SMSP constraints
+ * and disable reduced Tx Power
+ */
+ if (!vif->bss_conf.assoc) {
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ if (iwl_mvm_bt_coex_reduced_txp(mvm,
+ mvmvif->ap_sta_id,
+ false))
+ IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ return;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ /* default smps_mode for AP / GO is OFF */
+ smps_mode = IEEE80211_SMPS_OFF;
+ if (!mvmvif->ap_ibss_active) {
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ return;
+ }
+
+ /* the Ack / Cts kill mask must be default if AP / GO */
+ data->reduced_tx_power = false;
+ break;
+ default:
+ return;
+ }
chanctx_conf = rcu_dereference(vif->chanctx_conf);
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
- /* ... and it is an associated STATION, relax constraints */
- if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ /* ... relax constraints and disable rssi events */
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
- /* SoftAP / GO will always be primary */
- if (vif->type == NL80211_IFTYPE_AP) {
- if (!mvmvif->ap_ibss_active)
- return;
+ bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+ if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_STATIC;
+ else if (bt_activity_grading >= BT_LOW_TRAFFIC)
+ smps_mode = vif->type == NL80211_IFTYPE_AP ?
+ IEEE80211_SMPS_OFF :
+ IEEE80211_SMPS_DYNAMIC;
+ IWL_DEBUG_COEX(data->mvm,
+ "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
+ mvmvif->id, data->notif->bt_status, bt_activity_grading,
+ smps_mode);
- /* the Ack / Cts kill mask must be default if AP / GO */
- data->reduced_tx_power = false;
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
- if (chanctx_conf == data->primary)
- return;
+ /* low latency is always primary */
+ if (iwl_mvm_vif_low_latency(mvmvif)) {
+ data->primary_ll = true;
- /* downgrade the current primary no matter what its type is */
data->secondary = data->primary;
data->primary = chanctx_conf;
- return;
}
- data->num_bss_ifaces++;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (!mvmvif->ap_ibss_active)
+ return;
- /* we are now a STA / P2P Client, and take associated ones only */
- if (!vif->bss_conf.assoc)
+ if (chanctx_conf == data->primary)
+ return;
+
+ if (!data->primary_ll) {
+ /*
+ * downgrade the current primary no matter what its
+ * type is.
+ */
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
+ } else {
+ /* there is low latency vif - we will be secondary */
+ data->secondary = chanctx_conf;
+ }
return;
+ }
- /* STA / P2P Client, try to be primary if first vif */
+ /*
+ * STA / P2P Client, try to be primary if first vif. If we are in low
+ * latency mode, we are already in primary and just don't do much
+ */
if (!data->primary || data->primary == chanctx_conf)
data->primary = chanctx_conf;
else if (!data->secondary)
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
- if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
- smps_mode = IEEE80211_SMPS_STATIC;
- else if (le32_to_cpu(data->notif->bt_activity_grading) >=
- BT_LOW_TRAFFIC)
- smps_mode = IEEE80211_SMPS_DYNAMIC;
-
- IWL_DEBUG_COEX(data->mvm,
- "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
- mvmvif->id, data->notif->bt_status,
- data->notif->bt_activity_grading, smps_mode);
-
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
-
/* don't reduce the Tx power if in loose scheme */
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
mvm->cfg->bt_shared_single_ant) {
@@ -918,8 +1178,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
-u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
enum iwl_bt_coex_lut_type lut_type;
@@ -955,6 +1215,38 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
}
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info, u8 ac)
+{
+ __le16 fc = hdr->frame_control;
+
+ if (info->band != IEEE80211_BAND_2GHZ)
+ return 0;
+
+ if (unlikely(mvm->bt_tx_prio))
+ return mvm->bt_tx_prio - 1;
+
+ /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
+ if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
+ is_multicast_ether_addr(hdr->addr1) ||
+ ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) ||
+ ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc))
+ return 3;
+
+ switch (ac) {
+ case IEEE80211_AC_BE:
+ return 1;
+ case IEEE80211_AC_VO:
+ return 3;
+ case IEEE80211_AC_VI:
+ return 2;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
@@ -962,3 +1254,69 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
iwl_mvm_bt_coex_notif_handle(mvm);
}
+
+int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *dev_cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 ant_isolation = le32_to_cpup((void *)pkt->data);
+ u8 __maybe_unused lower_bound, upper_bound;
+ u8 lut;
+
+ struct iwl_bt_coex_cmd *bt_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = BT_CONFIG,
+ .len = { sizeof(*bt_cmd), },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ .flags = CMD_SYNC,
+ };
+
+ if (!IWL_MVM_BT_COEX_CORUNNING)
+ return 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (ant_isolation == mvm->last_ant_isol)
+ return 0;
+
+ for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
+ if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
+ break;
+
+ lower_bound = antenna_coupling_ranges[lut].range;
+
+ if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
+ upper_bound = antenna_coupling_ranges[lut + 1].range;
+ else
+ upper_bound = antenna_coupling_ranges[lut].range;
+
+ IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
+ ant_isolation, lower_bound, upper_bound, lut);
+
+ mvm->last_ant_isol = ant_isolation;
+
+ if (mvm->last_corun_lut == lut)
+ return 0;
+
+ mvm->last_corun_lut = lut;
+
+ bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+ if (!bt_cmd)
+ return 0;
+ cmd.data[0] = bt_cmd;
+
+ bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+ BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40);
+
+ /* For the moment, use the same LUT for 20GHz and 40GHz */
+ memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
+ sizeof(bt_cmd->bt4_corun_lut20));
+
+ memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
+ sizeof(bt_cmd->bt4_corun_lut40));
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 036857698565..51685693af2e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -78,5 +78,9 @@
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
#define IWL_MVM_PS_SNOOZE_WINDOW 50
#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
+#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
+#define IWL_MVM_BT_COEX_SYNC2SCO 1
+#define IWL_MVM_BT_COEX_CORUNNING 1
+#define IWL_MVM_BT_COEX_MPLUT 1
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index f36a7ee0267f..e56f5a0edf85 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -376,139 +376,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return err;
}
-static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- union {
- struct iwl_proto_offload_cmd_v1 v1;
- struct iwl_proto_offload_cmd_v2 v2;
- struct iwl_proto_offload_cmd_v3_small v3s;
- struct iwl_proto_offload_cmd_v3_large v3l;
- } cmd = {};
- struct iwl_host_cmd hcmd = {
- .id = PROT_OFFLOAD_CONFIG_CMD,
- .flags = CMD_SYNC,
- .data[0] = &cmd,
- .dataflags[0] = IWL_HCMD_DFL_DUP,
- };
- struct iwl_proto_offload_cmd_common *common;
- u32 enabled = 0, size;
- u32 capa_flags = mvm->fw->ucode_capa.flags;
-#if IS_ENABLED(CONFIG_IPV6)
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int i;
-
- if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
- capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
- struct iwl_ns_config *nsc;
- struct iwl_targ_addr *addrs;
- int n_nsc, n_addrs;
- int c;
-
- if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
- nsc = cmd.v3s.ns_config;
- n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
- addrs = cmd.v3s.targ_addrs;
- n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
- } else {
- nsc = cmd.v3l.ns_config;
- n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
- addrs = cmd.v3l.targ_addrs;
- n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
- }
-
- if (mvmvif->num_target_ipv6_addrs)
- enabled |= IWL_D3_PROTO_OFFLOAD_NS;
-
- /*
- * For each address we have (and that will fit) fill a target
- * address struct and combine for NS offload structs with the
- * solicited node addresses.
- */
- for (i = 0, c = 0;
- i < mvmvif->num_target_ipv6_addrs &&
- i < n_addrs && c < n_nsc; i++) {
- struct in6_addr solicited_addr;
- int j;
-
- addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
- &solicited_addr);
- for (j = 0; j < c; j++)
- if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
- &solicited_addr) == 0)
- break;
- if (j == c)
- c++;
- addrs[i].addr = mvmvif->target_ipv6_addrs[i];
- addrs[i].config_num = cpu_to_le32(j);
- nsc[j].dest_ipv6_addr = solicited_addr;
- memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
- }
-
- if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
- cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
- else
- cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
- } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
- if (mvmvif->num_target_ipv6_addrs) {
- enabled |= IWL_D3_PROTO_OFFLOAD_NS;
- memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
- }
-
- BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
- sizeof(mvmvif->target_ipv6_addrs[0]));
-
- for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
- IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
- memcpy(cmd.v2.target_ipv6_addr[i],
- &mvmvif->target_ipv6_addrs[i],
- sizeof(cmd.v2.target_ipv6_addr[i]));
- } else {
- if (mvmvif->num_target_ipv6_addrs) {
- enabled |= IWL_D3_PROTO_OFFLOAD_NS;
- memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
- }
-
- BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
- sizeof(mvmvif->target_ipv6_addrs[0]));
-
- for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
- IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
- memcpy(cmd.v1.target_ipv6_addr[i],
- &mvmvif->target_ipv6_addrs[i],
- sizeof(cmd.v1.target_ipv6_addr[i]));
- }
-#endif
-
- if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
- common = &cmd.v3s.common;
- size = sizeof(cmd.v3s);
- } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
- common = &cmd.v3l.common;
- size = sizeof(cmd.v3l);
- } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
- common = &cmd.v2.common;
- size = sizeof(cmd.v2);
- } else {
- common = &cmd.v1.common;
- size = sizeof(cmd.v1);
- }
-
- if (vif->bss_conf.arp_addr_cnt) {
- enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
- common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
- memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
- }
-
- if (!enabled)
- return 0;
-
- common->enabled = cpu_to_le32(enabled);
-
- hcmd.len[0] = size;
- return iwl_mvm_send_cmd(mvm, &hcmd);
-}
-
enum iwl_mvm_tcp_packet_type {
MVM_TCP_TX_SYN,
MVM_TCP_RX_SYNACK,
@@ -846,8 +713,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
quota_cmd.quotas[0].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
mvmvif->phy_ctxt->color));
- quota_cmd.quotas[0].quota = cpu_to_le32(100);
- quota_cmd.quotas[0].max_duration = cpu_to_le32(1000);
+ quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
for (i = 1; i < MAX_BINDINGS; i++)
quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
@@ -927,6 +794,20 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
}
+static int
+iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
+ const struct iwl_wowlan_config_cmd_v3 *cmd)
+{
+ /* start only with the v2 part of the command */
+ u16 cmd_len = sizeof(cmd->common);
+
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
+ cmd_len = sizeof(*cmd);
+
+ return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC,
+ cmd_len, cmd);
+}
+
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan,
bool test)
@@ -939,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif;
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
- struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
struct iwl_d3_manager_config d3_cfg_cmd_data = {
@@ -961,9 +842,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.tkip = &tkip_cmd,
.use_tkip = false,
};
- int ret, i;
+ int ret;
int len __maybe_unused;
- u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
if (!wowlan) {
/*
@@ -980,8 +860,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- old_aux_sta_id = mvm->aux_sta.sta_id;
-
/* see if there's only a single BSS vif and it's associated */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -1005,49 +883,41 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
- /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
+ /* TODO: wowlan_config_cmd.common.wowlan_ba_teardown_tids */
- wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
+ wowlan_config_cmd.common.is_11n_connection =
+ ap_sta->ht_cap.ht_supported;
/* Query the last used seqno and set it */
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
if (ret < 0)
goto out_noreset;
- wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
+ wowlan_config_cmd.common.non_qos_seq = cpu_to_le16(ret);
- /*
- * For QoS counters, we store the one to use next, so subtract 0x10
- * since the uCode will add 0x10 *before* using the value while we
- * increment after using the value (i.e. store the next value to use).
- */
- for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
- u16 seq = mvm_ap_sta->tid_data[i].seq_number;
- seq -= 0x10;
- wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
- }
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd.common);
if (wowlan->disconnect)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
if (wowlan->magic_pkt)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
if (wowlan->gtk_rekey_failure)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
if (wowlan->eap_identity_req)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
if (wowlan->four_way_handshake)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
if (wowlan->n_patterns)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
if (wowlan->rfkill_release)
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
if (wowlan->tcp) {
@@ -1055,7 +925,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* Set the "link change" (really "link lost") flag as well
* since that implies losing the TCP connection.
*/
- wowlan_config_cmd.wakeup_filter |=
+ wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
@@ -1067,16 +937,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_stop_device(mvm->trans);
/*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. Store the real STA ID here
- * and assign 0. When we leave this function, we'll restore
- * the original value for the resume code.
- */
- old_ap_sta_id = mvm_ap_sta->sta_id;
- mvm_ap_sta->sta_id = 0;
- mvmvif->ap_sta_id = 0;
-
- /*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
* the reprogramming is going to be manual to avoid adding
@@ -1096,16 +956,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
- /*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. As a result, we have to move
- * the auxiliary station to ID 1 so the ID 0 remains free for
- * the AP station for later.
- * We set the sta_id to 1 here, and reset it to its previous
- * value (that we stored above) later.
- */
- mvm->aux_sta.sta_id = 1;
-
ret = iwl_mvm_load_d3_fw(mvm);
if (ret)
goto out;
@@ -1173,9 +1023,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
}
}
- ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION,
- CMD_SYNC, sizeof(wowlan_config_cmd),
- &wowlan_config_cmd);
+ ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
if (ret)
goto out;
@@ -1183,7 +1031,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_send_proto_offload(mvm, vif);
+ ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC);
if (ret)
goto out;
@@ -1191,11 +1039,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_power_update_device_mode(mvm);
+ ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto out;
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
goto out;
@@ -1222,10 +1070,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test);
out:
- mvm->aux_sta.sta_id = old_aux_sta_id;
- mvm_ap_sta->sta_id = old_ap_sta_id;
- mvmvif->ap_sta_id = old_ap_sta_id;
-
if (ret < 0)
ieee80211_restart_hw(mvm->hw);
out_noreset:
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 0e29cd83a06a..9b59e1d7ae71 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -185,7 +185,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_pm(mvm, vif, param, val);
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -202,7 +202,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
int bufsz = sizeof(buf);
int pos;
- pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
+ pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -225,6 +225,29 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
ap_sta_id = mvmvif->ap_sta_id;
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_ADHOC:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
+ break;
+ default:
+ break;
+ }
+
pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
mvmvif->id, mvmvif->color);
pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
@@ -249,9 +272,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
pos += scnprintf(buf+pos, bufsz-pos,
- "ap_sta_id %d - reduced Tx power %d\n",
+ "ap_sta_id %d - reduced Tx power %d force %d\n",
ap_sta_id,
- mvm_sta->bt_reduced_txpower);
+ mvm_sta->bt_reduced_txpower,
+ mvm_sta->bt_reduced_txpower_dbg);
}
}
@@ -269,6 +293,41 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_mvm_sta *mvmsta;
+ bool reduced_tx_power;
+ int ret;
+
+ if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return -ENOTCONN;
+
+ if (strtobool(buf, &reduced_tx_power) != 0)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
+ if (IS_ERR_OR_NULL(mvmsta)) {
+ mutex_unlock(&mvm->mutex);
+ return -ENOTCONN;
+ }
+
+ mvmsta->bt_reduced_txpower_dbg = false;
+ ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+ reduced_tx_power);
+ if (!ret)
+ mvmsta->bt_reduced_txpower_dbg = true;
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ? : count;
+}
+
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@@ -403,9 +462,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -460,6 +519,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u8 value;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_update_low_latency(mvm, vif, value);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[3];
+
+ buf[0] = mvmvif->low_latency ? '1' : '0';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -473,6 +567,8 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -496,15 +592,18 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
+ iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
- mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
- MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
- S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 369d4c90e669..1b52deea6081 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -60,11 +60,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
+#include <linux/vmalloc.h>
+
#include "mvm.h"
#include "sta.h"
#include "iwl-io.h"
#include "iwl-prph.h"
#include "debugfs.h"
+#include "fw-error-dump.h"
static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@@ -90,7 +93,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
int sta_id, drain, ret;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
@@ -105,19 +108,63 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
mutex_lock(&mvm->mutex);
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+
+ if (!mvmsta)
ret = -ENOENT;
else
- ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? :
- count;
+ ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
mutex_unlock(&mvm->mutex);
return ret;
}
+static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
+{
+ struct iwl_mvm *mvm = inode->i_private;
+ int ret;
+
+ if (!mvm)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ if (!mvm->fw_error_dump) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ file->private_data = mvm->fw_error_dump;
+ mvm->fw_error_dump = NULL;
+ kfree(mvm->fw_error_sram);
+ mvm->fw_error_sram = NULL;
+ mvm->fw_error_sram_len = 0;
+ ret = 0;
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_fw_error_dump_file *dump_file = file->private_data;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ dump_file,
+ le32_to_cpu(dump_file->file_len));
+}
+
+static int iwl_dbgfs_fw_error_dump_release(struct inode *inode,
+ struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -251,7 +298,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
}
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_power_update_device_mode(mvm);
+ ret = iwl_mvm_power_update_device(mvm);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -351,6 +398,9 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
le32_to_cpu(notif->secondary_ch_lut));
pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
le32_to_cpu(notif->bt_activity_grading));
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "antenna isolation = %d CORUN LUT index = %d\n",
+ mvm->last_ant_isol, mvm->last_corun_lut);
mutex_unlock(&mvm->mutex);
@@ -393,6 +443,22 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t
+iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ u32 bt_tx_prio;
+
+ if (sscanf(buf, "%u", &bt_tx_prio) != 1)
+ return -EINVAL;
+ if (bt_tx_prio > 4)
+ return -EINVAL;
+
+ mvm->bt_tx_prio = bt_tx_prio;
+
+ return count;
+}
+
#define PRINT_STATS_LE32(_str, _val) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, _str, \
@@ -532,6 +598,80 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
}
#undef PRINT_STAT_LE32
+static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
+ char __user *user_buf, size_t count,
+ loff_t *ppos,
+ struct iwl_mvm_frame_stats *stats)
+{
+ char *buff, *pos, *endpos;
+ int idx, i;
+ int ret;
+ static const size_t bufsz = 1024;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ spin_lock_bh(&mvm->drv_stats_lock);
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos,
+ "Legacy/HT/VHT\t:\t%d/%d/%d\n",
+ stats->legacy_frames,
+ stats->ht_frames,
+ stats->vht_frames);
+ pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
+ stats->bw_20_frames,
+ stats->bw_40_frames,
+ stats->bw_80_frames);
+ pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
+ stats->ngi_frames,
+ stats->sgi_frames);
+ pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
+ stats->siso_frames,
+ stats->mimo2_frames);
+ pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
+ stats->fail_frames,
+ stats->success_frames);
+ pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
+ stats->agg_frames);
+ pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
+ stats->ampdu_count);
+ pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
+ stats->ampdu_count > 0 ?
+ (stats->agg_frames / stats->ampdu_count) : 0);
+
+ pos += scnprintf(pos, endpos - pos, "Last Rates\n");
+
+ idx = stats->last_frame_idx - 1;
+ for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
+ idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
+ if (stats->last_rates[idx] == 0)
+ continue;
+ pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
+ (int)(ARRAY_SIZE(stats->last_rates) - i));
+ pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
+ }
+ spin_unlock_bh(&mvm->drv_stats_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+
+ return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos,
+ &mvm->drv_rx_stats);
+}
+
static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
@@ -592,7 +732,7 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return -EINVAL;
if (scan_rx_ant > ANT_ABC)
return -EINVAL;
- if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
+ if (scan_rx_ant & ~mvm->fw->valid_rx_ant)
return -EINVAL;
mvm->scan_rx_ant = scan_rx_ant;
@@ -600,6 +740,187 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return count;
}
+#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ const struct iwl_fw_bcast_filter *filter;
+ char *buf;
+ int bufsz = 1024;
+ int i, j, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
+ filter = &cmd.filters[i];
+
+ ADD_TEXT("Filter [%d]:\n", i);
+ ADD_TEXT("\tDiscard=%d\n", filter->discard);
+ ADD_TEXT("\tFrame Type: %s\n",
+ filter->frame_type ? "IPv4" : "Generic");
+
+ for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
+ const struct iwl_fw_bcast_filter_attr *attr;
+
+ attr = &filter->attrs[j];
+ if (!attr->mask)
+ break;
+
+ ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
+ j, attr->offset,
+ attr->offset_type ? "IP End" :
+ "Payload Start",
+ be32_to_cpu(attr->mask),
+ be32_to_cpu(attr->val),
+ le16_to_cpu(attr->reserved1));
+ }
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int pos, next_pos;
+ struct iwl_fw_bcast_filter filter = {};
+ struct iwl_bcast_filter_cmd cmd;
+ u32 filter_id, attr_id, mask, value;
+ int err = 0;
+
+ if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
+ &filter.frame_type, &pos) != 3)
+ return -EINVAL;
+
+ if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
+ filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
+ return -EINVAL;
+
+ for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
+ attr_id++) {
+ struct iwl_fw_bcast_filter_attr *attr =
+ &filter.attrs[attr_id];
+
+ if (pos >= count)
+ break;
+
+ if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
+ &attr->offset, &attr->offset_type,
+ &mask, &value, &next_pos) != 4)
+ return -EINVAL;
+
+ attr->mask = cpu_to_be32(mask);
+ attr->val = cpu_to_be32(value);
+ if (mask)
+ filter.num_attrs++;
+
+ pos += next_pos;
+ }
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
+ &filter, sizeof(filter));
+
+ /* send updated bcast filtering configuration */
+ if (mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ char *buf;
+ int bufsz = 1024;
+ int i, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
+ const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
+
+ ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
+ i, mac->default_discard, mac->attached_filters);
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_bcast_filter_cmd cmd;
+ struct iwl_fw_bcast_mac mac = {};
+ u32 mac_id, attached_filters;
+ int err = 0;
+
+ if (!mvm->bcast_filters)
+ return -ENOENT;
+
+ if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
+ &attached_filters) != 3)
+ return -EINVAL;
+
+ if (mac_id >= ARRAY_SIZE(cmd.macs) ||
+ mac.default_discard > 1 ||
+ attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
+ return -EINVAL;
+
+ mac.attached_filters = cpu_to_le16(attached_filters);
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
+ &mac, sizeof(mac));
+
+ /* send updated bcast filtering configuration */
+ if (mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+#endif
+
#ifdef CONFIG_PM_SLEEP
static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@@ -658,15 +979,117 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
}
#endif
+#define PRINT_MVM_REF(ref) do { \
+ if (test_bit(ref, mvm->ref_bitmap)) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ "\t(0x%lx) %s\n", \
+ BIT(ref), #ref); \
+} while (0)
+
+static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%lx\n",
+ mvm->ref_bitmap[0]);
+
+ PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
+ PRINT_MVM_REF(IWL_MVM_REF_SCAN);
+ PRINT_MVM_REF(IWL_MVM_REF_ROC);
+ PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
+ PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
+ PRINT_MVM_REF(IWL_MVM_REF_USER);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long value;
+ int ret;
+ bool taken;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+
+ taken = test_bit(IWL_MVM_REF_USER, mvm->ref_bitmap);
+ if (value == 1 && !taken)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
+ else if (value == 0 && taken)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
-#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, mvm, \
+#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, mvm, \
&iwl_dbgfs_##name##_ops)) \
goto err; \
} while (0)
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t
+iwl_dbgfs_prph_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+
+ if (!mvm->dbgfs_prph_reg_addr)
+ return -EINVAL;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
+ mvm->dbgfs_prph_reg_addr,
+ iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 args;
+ u32 value;
+
+ args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
+ /* if we only want to set the reg address - nothing more to do */
+ if (args == 1)
+ goto out;
+
+ /* otherwise, make sure we have both address and value */
+ if (args != 2)
+ return -EINVAL;
+
+ iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
+out:
+ return count;
+}
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
@@ -677,9 +1100,23 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
+
+static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
+ .open = iwl_dbgfs_fw_error_dump_open,
+ .read = iwl_dbgfs_fw_error_dump_read,
+ .release = iwl_dbgfs_fw_error_dump_release,
+};
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
+#endif
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
@@ -687,24 +1124,52 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
+ struct dentry *bcast_dir __maybe_unused;
char buf[100];
+ spin_lock_init(&mvm->drv_stats_lock);
+
mvm->debugfs_dir = dbgfs_dir;
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
+ bcast_dir = debugfs_create_dir("bcast_filtering",
+ mvm->debugfs_dir);
+ if (!bcast_dir)
+ goto err;
+
+ if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR,
+ bcast_dir,
+ &mvm->dbgfs_bcast_filtering.override))
+ goto err;
+
+ MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
+ bcast_dir, S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
+ bcast_dir, S_IWUSR | S_IRUSR);
+ }
+#endif
+
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 1b4e54d416b0..21877e5966a8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -70,37 +70,28 @@
/**
* enum iwl_bt_coex_flags - flags for BT_COEX command
- * @BT_CH_PRIMARY_EN:
- * @BT_CH_SECONDARY_EN:
- * @BT_NOTIF_COEX_OFF:
* @BT_COEX_MODE_POS:
* @BT_COEX_MODE_MSK:
* @BT_COEX_DISABLE:
* @BT_COEX_2W:
* @BT_COEX_3W:
* @BT_COEX_NW:
- * @BT_USE_DEFAULTS:
- * @BT_SYNC_2_BT_DISABLE:
- * @BT_COEX_CORUNNING_TBL_EN:
+ * @BT_COEX_SYNC2SCO:
+ * @BT_COEX_CORUNNING:
+ * @BT_COEX_MPLUT:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
enum iwl_bt_coex_flags {
- BT_CH_PRIMARY_EN = BIT(0),
- BT_CH_SECONDARY_EN = BIT(1),
- BT_NOTIF_COEX_OFF = BIT(2),
BT_COEX_MODE_POS = 3,
BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS,
BT_COEX_2W = 0x1 << BT_COEX_MODE_POS,
BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
- BT_USE_DEFAULTS = BIT(6),
- BT_SYNC_2_BT_DISABLE = BIT(7),
- BT_COEX_CORUNNING_TBL_EN = BIT(8),
- BT_COEX_MPLUT_TBL_EN = BIT(9),
- /* Bit 10 is reserved */
- BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
+ BT_COEX_SYNC2SCO = BIT(7),
+ BT_COEX_CORUNNING = BIT(8),
+ BT_COEX_MPLUT = BIT(9),
};
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 8415ff312d0e..10fcc1a79ebd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -231,11 +231,15 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
- /* BIT(11) reserved */
+ IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
+ IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
+ IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
+ IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
-struct iwl_wowlan_config_cmd {
+struct iwl_wowlan_config_cmd_v2 {
__le32 wakeup_filter;
__le16 non_qos_seq;
__le16 qos_seq[8];
@@ -243,6 +247,12 @@ struct iwl_wowlan_config_cmd {
u8 is_11n_connection;
} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
+struct iwl_wowlan_config_cmd_v3 {
+ struct iwl_wowlan_config_cmd_v2 common;
+ u8 offloading_tid;
+ u8 reserved[3];
+} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
+
/*
* WOWLAN_TSC_RSC_PARAMS
*/
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 884c08725308..cbbcd8e284e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -301,54 +301,65 @@ struct iwl_beacon_filter_cmd {
/* Beacon filtering and beacon abort */
#define IWL_BF_ENERGY_DELTA_DEFAULT 5
+#define IWL_BF_ENERGY_DELTA_D0I3 20
#define IWL_BF_ENERGY_DELTA_MAX 255
#define IWL_BF_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_STATE_DEFAULT 72
+#define IWL_BF_ROAMING_STATE_D0I3 72
#define IWL_BF_ROAMING_STATE_MAX 255
#define IWL_BF_ROAMING_STATE_MIN 0
#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_D0I3 112
#define IWL_BF_TEMP_THRESHOLD_MAX 255
#define IWL_BF_TEMP_THRESHOLD_MIN 0
#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
#define IWL_BF_TEMP_FAST_FILTER_MAX 255
#define IWL_BF_TEMP_FAST_FILTER_MIN 0
#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
#define IWL_BF_DEBUG_FLAG_DEFAULT 0
+#define IWL_BF_DEBUG_FLAG_D0I3 0
#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWL_BF_ESCAPE_TIMER_D0I3 1024
#define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0
#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D0I3 6
#define IWL_BA_ESCAPE_TIMER_D3 9
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
-#define IWL_BF_CMD_CONFIG_DEFAULTS \
- .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \
- .bf_roaming_energy_delta = \
- cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
- .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \
- .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \
- .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
- .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
- .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \
- .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
- .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
+#define IWL_BF_CMD_CONFIG(mode) \
+ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \
+ .bf_roaming_energy_delta = \
+ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \
+ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \
+ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \
+ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \
+ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \
+ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \
+ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \
+ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
+#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
+#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 85057219cc43..39148b5bb332 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -257,7 +257,8 @@ enum {
/* Bit 17-18: (0) SS, (1) SS*2 */
#define RATE_MCS_STBC_POS 17
-#define RATE_MCS_STBC_MSK (1 << RATE_MCS_STBC_POS)
+#define RATE_MCS_HT_STBC_MSK (3 << RATE_MCS_STBC_POS)
+#define RATE_MCS_VHT_STBC_MSK (1 << RATE_MCS_STBC_POS)
/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
#define RATE_MCS_BF_POS 19
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 1b60fdff6a56..d63647867262 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -199,11 +199,14 @@ enum iwl_sta_modify_flag {
* @STA_SLEEP_STATE_AWAKE:
* @STA_SLEEP_STATE_PS_POLL:
* @STA_SLEEP_STATE_UAPSD:
+ * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ * (last) released frame
*/
enum iwl_sta_sleep_flag {
- STA_SLEEP_STATE_AWAKE = 0,
- STA_SLEEP_STATE_PS_POLL = BIT(0),
- STA_SLEEP_STATE_UAPSD = BIT(1),
+ STA_SLEEP_STATE_AWAKE = 0,
+ STA_SLEEP_STATE_PS_POLL = BIT(0),
+ STA_SLEEP_STATE_UAPSD = BIT(1),
+ STA_SLEEP_STATE_MOREDATA = BIT(2),
};
/* STA ID and color bits definitions */
@@ -318,13 +321,15 @@ struct iwl_mvm_add_sta_cmd_v5 {
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
/**
- * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
- * VER_6 of this command is quite similar to VER_5 except
+ * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
+ * VER_7 of this command is quite similar to VER_5 except
* exclusion of all fields related to the security key installation.
+ * It only differs from VER_6 by the "awake_acs" field that is
+ * reserved and ignored in VER_6.
*/
-struct iwl_mvm_add_sta_cmd_v6 {
+struct iwl_mvm_add_sta_cmd_v7 {
u8 add_modify;
- u8 reserved1;
+ u8 awake_acs;
__le16 tid_disable_tx;
__le32 mac_id_n_color;
u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
@@ -342,7 +347,7 @@ struct iwl_mvm_add_sta_cmd_v6 {
__le16 assoc_id;
__le16 beamform_flags;
__le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
/**
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
@@ -432,5 +437,15 @@ struct iwl_mvm_wep_key_cmd {
struct iwl_mvm_wep_key wep_key[0];
} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+/**
+ * struct iwl_mvm_eosp_notification - EOSP notification from firmware
+ * @remain_frame_count: # of frames remaining, non-zero if SP was cut
+ * short by GO absence
+ * @sta_id: station ID
+ */
+struct iwl_mvm_eosp_notification {
+ __le32 remain_frame_count;
+ __le32 sta_id;
+} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index b674c2a2b51c..8e122f3a7a74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -76,6 +76,8 @@
* @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
* @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
* @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
+ * on old firmwares).
* @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
* @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
* Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
@@ -107,6 +109,7 @@ enum iwl_tx_flags {
TX_CMD_FLG_VHT_NDPA = BIT(8),
TX_CMD_FLG_HT_NDPA = BIT(9),
TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
+ TX_CMD_FLG_BT_PRIO_POS = 11,
TX_CMD_FLG_BT_DIS = BIT(12),
TX_CMD_FLG_SEQ_CTL = BIT(13),
TX_CMD_FLG_MORE_FRAG = BIT(14),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 989d7dbdca6c..6e75b52588de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -70,7 +70,7 @@
#include "fw-api-mac.h"
#include "fw-api-power.h"
#include "fw-api-d3.h"
-#include "fw-api-bt-coex.h"
+#include "fw-api-coex.h"
/* maximal number of Tx queues in any platform */
#define IWL_MVM_MAX_QUEUES 20
@@ -95,6 +95,7 @@ enum {
/* PHY context commands */
PHY_CONTEXT_CMD = 0x8,
DBG_CFG = 0x9,
+ ANTENNA_COUPLING_NOTIFICATION = 0xa,
/* station table */
ADD_STA_KEY = 0x17,
@@ -163,6 +164,7 @@ enum {
TX_ANT_CONFIGURATION_CMD = 0x98,
BT_CONFIG = 0x9b,
STATISTICS_NOTIFICATION = 0x9d,
+ EOSP_NOTIFICATION = 0x9e,
REDUCE_TX_POWER_CMD = 0x9f,
/* RF-KILL commands and notifications */
@@ -190,6 +192,7 @@ enum {
REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7,
+ BCAST_FILTER_CMD = 0xcf,
MCAST_FILTER_CMD = 0xd0,
/* D3 commands/notifications */
@@ -197,6 +200,7 @@ enum {
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
OFFLOADS_QUERY_CMD = 0xd5,
REMOTE_WAKE_CONFIG_CMD = 0xd6,
+ D0I3_END_CMD = 0xed,
/* for WoWLAN in particular */
WOWLAN_PATTERNS = 0xe0,
@@ -313,14 +317,12 @@ enum {
/* Section types for NVM_ACCESS_CMD */
enum {
- NVM_SECTION_TYPE_HW = 0,
- NVM_SECTION_TYPE_SW,
- NVM_SECTION_TYPE_PAPD,
- NVM_SECTION_TYPE_BT,
- NVM_SECTION_TYPE_CALIBRATION,
- NVM_SECTION_TYPE_PRODUCTION,
- NVM_SECTION_TYPE_POST_FCS_CALIB,
- NVM_NUM_OF_SECTIONS,
+ NVM_SECTION_TYPE_SW = 1,
+ NVM_SECTION_TYPE_REGULATORY = 3,
+ NVM_SECTION_TYPE_CALIBRATION = 4,
+ NVM_SECTION_TYPE_PRODUCTION = 5,
+ NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
+ NVM_MAX_NUM_SECTIONS = 12,
};
/**
@@ -412,6 +414,35 @@ struct mvm_alive_resp {
__le32 scd_base_ptr; /* SRAM address for SCD */
} __packed; /* ALIVE_RES_API_S_VER_1 */
+struct mvm_alive_resp_ver2 {
+ __le16 status;
+ __le16 flags;
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 id;
+ u8 api_minor;
+ u8 api_major;
+ u8 ver_subtype;
+ u8 ver_type;
+ u8 mac;
+ u8 opt;
+ __le16 reserved2;
+ __le32 timestamp;
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
+ __le32 cpu_register_ptr;
+ __le32 dbgm_config_ptr;
+ __le32 alive_counter_ptr;
+ __le32 scd_base_ptr; /* SRAM address for SCD */
+ __le32 st_fwrd_addr; /* pointer to Store and forward */
+ __le32 st_fwrd_size;
+ u8 umac_minor; /* UMAC version: minor */
+ u8 umac_major; /* UMAC version: major */
+ __le16 umac_id; /* UMAC version: id */
+ __le32 error_info_addr; /* SRAM address for UMAC error log */
+ __le32 dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_2 */
+
/* Error response/notification */
enum {
FW_ERR_UNKNOWN_CMD = 0x0,
@@ -682,6 +713,7 @@ enum {
TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
+ T2_V2_START_IMMEDIATELY = BIT(11),
TE_V2_NOTIF_MSK = 0xff,
@@ -1159,6 +1191,90 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
+#define MAX_BCAST_FILTERS 8
+#define MAX_BCAST_FILTER_ATTRS 2
+
+/**
+ * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
+ * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
+ * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
+ * start of ip payload).
+ */
+enum iwl_mvm_bcast_filter_attr_offset {
+ BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
+ BCAST_FILTER_OFFSET_IP_END = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
+ * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
+ * @offset: starting offset of this pattern.
+ * @val: value to match - big endian (MSB is the first
+ * byte to match from offset pos).
+ * @mask: mask to match (big endian).
+ */
+struct iwl_fw_bcast_filter_attr {
+ u8 offset_type;
+ u8 offset;
+ __le16 reserved1;
+ __be32 val;
+ __be32 mask;
+} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
+
+/**
+ * enum iwl_mvm_bcast_filter_frame_type - filter frame type
+ * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
+ * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
+ */
+enum iwl_mvm_bcast_filter_frame_type {
+ BCAST_FILTER_FRAME_TYPE_ALL = 0,
+ BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter - broadcast filter
+ * @discard: discard frame (1) or let it pass (0).
+ * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
+ * @num_attrs: number of valid attributes in this filter.
+ * @attrs: attributes of this filter. a filter is considered matched
+ * only when all its attributes are matched (i.e. AND relationship)
+ */
+struct iwl_fw_bcast_filter {
+ u8 discard;
+ u8 frame_type;
+ u8 num_attrs;
+ u8 reserved1;
+ struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
+} __packed; /* BCAST_FILTER_S_VER_1 */
+
+/**
+ * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
+ * @default_discard: default action for this mac (discard (1) / pass (0)).
+ * @attached_filters: bitmap of relevant filters for this mac.
+ */
+struct iwl_fw_bcast_mac {
+ u8 default_discard;
+ u8 reserved1;
+ __le16 attached_filters;
+} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
+
+/**
+ * struct iwl_bcast_filter_cmd - broadcast filtering configuration
+ * @disable: enable (0) / disable (1)
+ * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
+ * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
+ * @filters: broadcast filters
+ * @macs: broadcast filtering configuration per-mac
+ */
+struct iwl_bcast_filter_cmd {
+ u8 disable;
+ u8 max_bcast_filters;
+ u8 max_macs;
+ u8 reserved1;
+ struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
+ struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
+} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
+
struct mvm_statistics_dbg {
__le32 burst_check;
__le32 burst_count;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
new file mode 100644
index 000000000000..58c8941c0d95
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
@@ -0,0 +1,106 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_error_dump_h__
+#define __fw_error_dump_h__
+
+#include <linux/types.h>
+
+#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
+
+/**
+ * enum iwl_fw_error_dump_type - types of data in the dump file
+ * @IWL_FW_ERROR_DUMP_SRAM:
+ * @IWL_FW_ERROR_DUMP_REG:
+ */
+enum iwl_fw_error_dump_type {
+ IWL_FW_ERROR_DUMP_SRAM = 0,
+ IWL_FW_ERROR_DUMP_REG = 1,
+
+ IWL_FW_ERROR_DUMP_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_data - data for one type
+ * @type: %enum iwl_fw_error_dump_type
+ * @len: the length starting from %data - must be a multiplier of 4.
+ * @data: the data itself padded to be a multiplier of 4.
+ */
+struct iwl_fw_error_dump_data {
+ __le32 type;
+ __le32 len;
+ __u8 data[];
+} __packed __aligned(4);
+
+/**
+ * struct iwl_fw_error_dump_file - the layout of the header of the file
+ * @barker: must be %IWL_FW_ERROR_DUMP_BARKER
+ * @file_len: the length of all the file starting from %barker
+ * @data: array of %struct iwl_fw_error_dump_data
+ */
+struct iwl_fw_error_dump_file {
+ __le32 barker;
+ __le32 file_len;
+ u8 data[0];
+} __packed __aligned(4);
+
+#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c03d39541f9e..7ce20062f32d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -110,18 +110,48 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
struct mvm_alive_resp *palive;
-
- palive = (void *)pkt->data;
-
- mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr);
- mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
-
- alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
- IWL_DEBUG_FW(mvm,
- "Alive ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive->status), palive->ver_type,
- palive->ver_subtype, palive->flags);
+ struct mvm_alive_resp_ver2 *palive2;
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+ palive = (void *)pkt->data;
+
+ mvm->support_umac_log = false;
+ mvm->error_event_table =
+ le32_to_cpu(palive->error_event_table_ptr);
+ mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+
+ alive_data->valid = le16_to_cpu(palive->status) ==
+ IWL_ALIVE_STATUS_OK;
+ IWL_DEBUG_FW(mvm,
+ "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+ le16_to_cpu(palive->status), palive->ver_type,
+ palive->ver_subtype, palive->flags);
+ } else {
+ palive2 = (void *)pkt->data;
+
+ mvm->error_event_table =
+ le32_to_cpu(palive2->error_event_table_ptr);
+ mvm->log_event_table =
+ le32_to_cpu(palive2->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
+ mvm->umac_error_event_table =
+ le32_to_cpu(palive2->error_info_addr);
+
+ alive_data->valid = le16_to_cpu(palive2->status) ==
+ IWL_ALIVE_STATUS_OK;
+ if (mvm->umac_error_event_table)
+ mvm->support_umac_log = true;
+
+ IWL_DEBUG_FW(mvm,
+ "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+ le16_to_cpu(palive2->status), palive2->ver_type,
+ palive2->ver_subtype, palive2->flags);
+
+ IWL_DEBUG_FW(mvm,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ palive2->umac_major, palive2->umac_minor);
+ }
return true;
}
@@ -292,7 +322,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
}
/* Send TX valid antennas before triggering calibrations */
- ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
goto error;
@@ -328,8 +358,6 @@ out:
GFP_KERNEL);
if (!mvm->nvm_data)
return -ENOMEM;
- mvm->nvm_data->valid_rx_ant = 1;
- mvm->nvm_data->valid_tx_ant = 1;
mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
@@ -341,8 +369,6 @@ out:
return ret;
}
-#define UCODE_CALIB_TIMEOUT (2*HZ)
-
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
@@ -394,7 +420,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
- ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
goto error;
@@ -439,10 +465,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- ret = iwl_mvm_power_update_device_mode(mvm);
+ /* Initialize tx backoffs to the minimal possible */
+ iwl_mvm_tt_tx_backoff(mvm, 0);
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
+ ret = iwl_power_legacy_set_cam_mode(mvm);
+ if (ret)
+ goto error;
+ }
+
+ ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto error;
+ /* allow FW/transport low power modes if not during restart */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
@@ -466,7 +505,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
goto error;
}
- ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
+ ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
index 6b4ea6bf8ffe..e3b3cf4dbd77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -94,6 +94,8 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
int ret;
switch (mode) {
+ case IWL_LED_BLINK:
+ IWL_ERR(mvm, "Blink led mode not supported, used default\n");
case IWL_LED_DEFAULT:
case IWL_LED_RF_STATE:
mode = IWL_LED_RF_STATE;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index ba723d50939a..9ccec10bba16 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -90,6 +90,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
{
struct iwl_mvm_mac_iface_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 min_bi;
/* Skip the interface for which we are trying to assign a tsf_id */
if (vif == data->vif)
@@ -114,42 +115,57 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
switch (data->vif->type) {
case NL80211_IFTYPE_STATION:
/*
- * The new interface is client, so if the existing one
- * we're iterating is an AP, and both interfaces have the
- * same beacon interval, the same TSF should be used to
- * avoid drift between the new client and existing AP,
- * the existing AP will get drift updates from the new
- * client context in this case
+ * The new interface is a client, so if the one we're iterating
+ * is an AP, and the beacon interval of the AP is a multiple or
+ * divisor of the beacon interval of the client, the same TSF
+ * should be used to avoid drift between the new client and
+ * existing AP. The existing AP will get drift updates from the
+ * new client context in this case.
*/
- if (vif->type == NL80211_IFTYPE_AP) {
- if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
- (vif->bss_conf.beacon_int ==
- data->vif->bss_conf.beacon_int)) {
- data->preferred_tsf = mvmvif->tsf_id;
- return;
- }
+ if (vif->type != NL80211_IFTYPE_AP ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
}
break;
+
case NL80211_IFTYPE_AP:
/*
- * The new interface is AP/GO, so in case both interfaces
- * have the same beacon interval, it should get drift
- * updates from an existing client or use the same
- * TSF as an existing GO. There's no drift between
- * TSFs internally but if they used different TSFs
- * then a new client MAC could update one of them
- * and cause drift that way.
+ * The new interface is AP/GO, so if its beacon interval is a
+ * multiple or a divisor of the beacon interval of an existing
+ * interface, it should get drift updates from an existing
+ * client or use the same TSF as an existing GO. There's no
+ * drift between TSFs internally but if they used different
+ * TSFs then a new client MAC could update one of them and
+ * cause drift that way.
*/
- if (vif->type == NL80211_IFTYPE_STATION ||
- vif->type == NL80211_IFTYPE_AP) {
- if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
- (vif->bss_conf.beacon_int ==
- data->vif->bss_conf.beacon_int)) {
- data->preferred_tsf = mvmvif->tsf_id;
- return;
- }
+ if ((vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION) ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
}
break;
default:
@@ -936,7 +952,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
TX_CMD_FLG_TSF);
mvm->mgmt_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
+ iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
mvm->mgmt_last_antenna_idx);
beacon_cmd.tx.rate_n_flags =
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c35b8661b395..4dd9ff43b8b6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -66,7 +66,9 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
+#include <linux/if_arp.h>
#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
#include <net/tcp.h>
#include "iwl-op-mode.h"
@@ -128,6 +130,117 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
};
#endif
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+/*
+ * Use the reserved field to indicate magic values.
+ * these values will only be used internally by the driver,
+ * and won't make it to the fw (reserved will be 0).
+ * BC_FILTER_MAGIC_IP - configure the val of this attribute to
+ * be the vif's ip address. in case there is not a single
+ * ip address (0, or more than 1), this attribute will
+ * be skipped.
+ * BC_FILTER_MAGIC_MAC - set the val of this attribute to
+ * the LSB bytes of the vif's mac address
+ */
+enum {
+ BC_FILTER_MAGIC_NONE = 0,
+ BC_FILTER_MAGIC_IP,
+ BC_FILTER_MAGIC_MAC,
+};
+
+static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
+ {
+ /* arp */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
+ .attrs = {
+ {
+ /* frame type - arp, hw type - ethernet */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header),
+ .val = cpu_to_be32(0x08060001),
+ .mask = cpu_to_be32(0xffffffff),
+ },
+ {
+ /* arp dest ip */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header) + 2 +
+ sizeof(struct arphdr) +
+ ETH_ALEN + sizeof(__be32) +
+ ETH_ALEN,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
+ },
+ },
+ },
+ {
+ /* dhcp offer bcast */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
+ .attrs = {
+ {
+ /* udp dest port - 68 (bootp client)*/
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = offsetof(struct udphdr, dest),
+ .val = cpu_to_be32(0x00440000),
+ .mask = cpu_to_be32(0xffff0000),
+ },
+ {
+ /* dhcp - lsb bytes of client hw address */
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = 38,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
+ },
+ },
+ },
+ /* last filter must be empty */
+ {},
+};
+#endif
+
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
+ WARN_ON(test_and_set_bit(ref_type, mvm->ref_bitmap));
+ iwl_trans_ref(mvm->trans);
+}
+
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
+ WARN_ON(!test_and_clear_bit(ref_type, mvm->ref_bitmap));
+ iwl_trans_unref(mvm->trans);
+}
+
+static void
+iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
+{
+ int i;
+
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+ return;
+
+ for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
+ if (ref == i)
+ continue;
+
+ IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d\n", i);
+ clear_bit(i, mvm->ref_bitmap);
+ iwl_trans_unref(mvm->trans);
+ }
+}
+
static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
{
int i;
@@ -168,6 +281,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->queues = mvm->first_agg_queue;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
hw->rate_control_algorithm = "iwl-mvm-rs";
/*
@@ -179,7 +295,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
+ if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -203,6 +319,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -246,7 +365,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -256,8 +375,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
- NL80211_FEATURE_P2P_GO_OPPPS |
- NL80211_FEATURE_LOW_PRIORITY_SCAN;
+ NL80211_FEATURE_P2P_GO_OPPPS;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
@@ -289,6 +407,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* assign default bcast filtering configuration */
+ mvm->bcast_filters = iwl_mvm_default_bcast_filters;
+#endif
+
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@@ -300,11 +423,55 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return ret;
}
+static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct iwl_mvm_sta *mvmsta;
+ bool defer = false;
+
+ /*
+ * double check the IN_D0I3 flag both before and after
+ * taking the spinlock, in order to prevent taking
+ * the spinlock when not needed.
+ */
+ if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
+ return false;
+
+ spin_lock(&mvm->d0i3_tx_lock);
+ /*
+ * testing the flag again ensures the skb dequeue
+ * loop (on d0i3 exit) hasn't run yet.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
+ goto out;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
+ mvmsta->sta_id != mvm->d0i3_ap_sta_id)
+ goto out;
+
+ __skb_queue_tail(&mvm->d0i3_tx, skb);
+ ieee80211_stop_queues(mvm->hw);
+
+ /* trigger wakeup */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
+
+ defer = true;
+out:
+ spin_unlock(&mvm->d0i3_tx_lock);
+ return defer;
+}
+
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
if (iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
@@ -315,8 +482,18 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
goto drop;
- if (control->sta) {
- if (iwl_mvm_tx_skb(mvm, skb, control->sta))
+ /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
+ if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
+ ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control) &&
+ !ieee80211_is_action(hdr->frame_control)))
+ sta = NULL;
+
+ if (sta) {
+ if (iwl_mvm_defer_tx(mvm, sta, skb))
+ return;
+ if (iwl_mvm_tx_skb(mvm, skb, sta))
goto drop;
return;
}
@@ -354,6 +531,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
+ bool tx_agg_ref = false;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
@@ -361,6 +539,23 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
if (!(mvm->nvm_data->sku_cap_11n_enable))
return -EACCES;
+ /* return from D0i3 before starting a new Tx aggregation */
+ if (action == IEEE80211_AMPDU_TX_START) {
+ iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
+ tx_agg_ref = true;
+
+ /*
+ * wait synchronously until D0i3 exit to get the correct
+ * sequence number for the tid
+ */
+ if (!wait_event_timeout(mvm->d0i3_exit_waitq,
+ !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
+ WARN_ON_ONCE(1);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
+ return -EIO;
+ }
+ }
+
mutex_lock(&mvm->mutex);
switch (action) {
@@ -398,6 +593,13 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
}
mutex_unlock(&mvm->mutex);
+ /*
+ * If the tid is marked as started, we won't use it for offloaded
+ * traffic on the next D0i3 entry. It's safe to unref.
+ */
+ if (tx_agg_ref)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
+
return ret;
}
@@ -422,6 +624,15 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL };
+
+ iwl_mvm_fw_error_dump(mvm);
+
+ /* notify the userspace about the error we had */
+ kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env);
+#endif
+
iwl_trans_stop_device(mvm->trans);
mvm->scan_status = IWL_MVM_SCAN_NONE;
@@ -434,6 +645,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@@ -441,6 +653,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
+ /* cleanup all stale references (scan, roc), but keep the
+ * ucode_down ref until reconfig is complete */
+ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
+
mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
}
@@ -470,11 +686,15 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ iwl_mvm_d0i3_enable_tx(mvm, NULL);
ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
ret);
+ /* allow transport/FW low power modes */
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
mutex_unlock(&mvm->mutex);
}
@@ -482,9 +702,14 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk);
mutex_lock(&mvm->mutex);
+
+ /* disallow low power states when the FW is down */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
/* async_handlers_wk is now blocked */
/*
@@ -510,14 +735,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->async_handlers_wk);
}
-static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = data;
-
- iwl_mvm_power_update_mode(mvm, vif);
-}
-
static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
{
u16 i;
@@ -585,7 +802,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC) {
u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
- qmask);
+ qmask,
+ ieee80211_vif_type_p2p(vif));
if (ret) {
IWL_ERR(mvm, "Failed to allocate bcast sta\n");
goto out_release;
@@ -599,10 +817,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_release;
- iwl_mvm_power_disable(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
+ if (ret)
+ goto out_release;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
if (ret)
goto out_remove_mac;
@@ -661,11 +881,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- /* TODO: remove this when legacy PM will be discarded */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
-
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -754,11 +969,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- /* TODO: remove this when legacy PM will be discarded */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
-
+ iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release:
@@ -876,6 +1087,156 @@ out:
*total_flags = 0;
}
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+struct iwl_bcast_iter_data {
+ struct iwl_mvm *mvm;
+ struct iwl_bcast_filter_cmd *cmd;
+ u8 current_filter;
+};
+
+static void
+iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
+ const struct iwl_fw_bcast_filter *in_filter,
+ struct iwl_fw_bcast_filter *out_filter)
+{
+ struct iwl_fw_bcast_filter_attr *attr;
+ int i;
+
+ memcpy(out_filter, in_filter, sizeof(*out_filter));
+
+ for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
+ attr = &out_filter->attrs[i];
+
+ if (!attr->mask)
+ break;
+
+ switch (attr->reserved1) {
+ case cpu_to_le16(BC_FILTER_MAGIC_IP):
+ if (vif->bss_conf.arp_addr_cnt != 1) {
+ attr->mask = 0;
+ continue;
+ }
+
+ attr->val = vif->bss_conf.arp_addr_list[0];
+ break;
+ case cpu_to_le16(BC_FILTER_MAGIC_MAC):
+ attr->val = *(__be32 *)&vif->addr[2];
+ break;
+ default:
+ break;
+ }
+ attr->reserved1 = 0;
+ out_filter->num_attrs++;
+ }
+}
+
+static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bcast_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_bcast_filter_cmd *cmd = data->cmd;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_fw_bcast_mac *bcast_mac;
+ int i;
+
+ if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
+ return;
+
+ bcast_mac = &cmd->macs[mvmvif->id];
+
+ /* enable filtering only for associated stations */
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ return;
+
+ bcast_mac->default_discard = 1;
+
+ /* copy all configured filters */
+ for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
+ /*
+ * Make sure we don't exceed our filters limit.
+ * if there is still a valid filter to be configured,
+ * be on the safe side and just allow bcast for this mac.
+ */
+ if (WARN_ON_ONCE(data->current_filter >=
+ ARRAY_SIZE(cmd->filters))) {
+ bcast_mac->default_discard = 0;
+ bcast_mac->attached_filters = 0;
+ break;
+ }
+
+ iwl_mvm_set_bcast_filter(vif,
+ &mvm->bcast_filters[i],
+ &cmd->filters[data->current_filter]);
+
+ /* skip current filter if it contains no attributes */
+ if (!cmd->filters[data->current_filter].num_attrs)
+ continue;
+
+ /* attach the filter to current mac */
+ bcast_mac->attached_filters |=
+ cpu_to_le16(BIT(data->current_filter));
+
+ data->current_filter++;
+ }
+}
+
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd)
+{
+ struct iwl_bcast_iter_data iter_data = {
+ .mvm = mvm,
+ .cmd = cmd,
+ };
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
+ cmd->max_macs = ARRAY_SIZE(cmd->macs);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* use debugfs filters/macs if override is configured */
+ if (mvm->dbgfs_bcast_filtering.override) {
+ memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
+ sizeof(cmd->filters));
+ memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
+ sizeof(cmd->macs));
+ return true;
+ }
+#endif
+
+ /* if no filters are configured, do nothing */
+ if (!mvm->bcast_filters)
+ return false;
+
+ /* configure and attach these filters for each associated sta vif */
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bcast_filter_iterator, &iter_data);
+
+ return true;
+}
+static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bcast_filter_cmd cmd;
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
+ return 0;
+
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
+#else
+static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ return 0;
+}
+#endif
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -928,6 +1289,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_sf_update(mvm, vif, false);
iwl_mvm_power_vif_assoc(mvm, vif);
+ if (vif->p2p)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/*
* If update fails - SF might be running in associated
@@ -940,27 +1303,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
if (ret)
IWL_ERR(mvm, "failed to remove AP station\n");
+
+ if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
+
+ if (vif->p2p)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
}
iwl_mvm_recalc_multicast(mvm);
+ iwl_mvm_configure_bcast_filter(mvm, vif);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
- /* Workaround for FW bug, otherwise FW disables device
- * power save upon disassociation
- */
- ret = iwl_mvm_power_update_mode(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update power mode\n");
- }
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
IEEE80211_SMPS_AUTOMATIC);
@@ -971,9 +1332,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
@@ -987,10 +1349,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
/* reset cqm events tracking */
mvmvif->bf_data.last_cqm_event = 0;
- ret = iwl_mvm_update_beacon_filter(mvm, vif);
+ ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
if (ret)
IWL_ERR(mvm, "failed to update CQM thresholds\n");
}
+
+ if (changes & BSS_CHANGED_ARP_FILTER) {
+ IWL_DEBUG_MAC80211(mvm, "arp filter changed");
+ iwl_mvm_configure_bcast_filter(mvm, vif);
+ }
}
static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
@@ -1024,8 +1391,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_remove;
- mvmvif->ap_ibss_active = true;
-
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
@@ -1036,8 +1401,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
mvmvif->ap_ibss_active = true;
/* power updated needs to be done before quotas */
- mvm->bound_vif_cnt++;
- iwl_mvm_power_update_binding(mvm, vif, true);
+ iwl_mvm_power_update_mac(mvm, vif);
ret = iwl_mvm_update_quotas(mvm, vif);
if (ret)
@@ -1047,14 +1411,15 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
+
iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
return 0;
out_quota_failed:
- mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
@@ -1080,6 +1445,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
+
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@@ -1088,8 +1455,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
iwl_mvm_binding_remove_vif(mvm, vif);
- mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -1103,26 +1469,20 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
u32 changes)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
- BSS_CHANGED_HT |
- BSS_CHANGED_BANDWIDTH;
- int ret;
/* Changes will be applied when the AP/IBSS is started */
if (!mvmvif->ap_ibss_active)
return;
- if (changes & ht_change) {
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
- }
+ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH) &&
+ iwl_mvm_mac_ctxt_changed(mvm, vif))
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
/* Need to send a new beacon template to the FW */
- if (changes & BSS_CHANGED_BEACON) {
- if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
- IWL_WARN(mvm, "Failed updating beacon data\n");
- }
+ if (changes & BSS_CHANGED_BEACON &&
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+ IWL_WARN(mvm, "Failed updating beacon data\n");
}
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -1162,13 +1522,30 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- ret = iwl_mvm_scan_request(mvm, vif, req);
- else
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_SCHED:
+ ret = iwl_mvm_sched_scan_stop(mvm);
+ if (ret) {
+ ret = -EBUSY;
+ goto out;
+ }
+ break;
+ case IWL_MVM_SCAN_NONE:
+ break;
+ default:
ret = -EBUSY;
+ goto out;
+ }
- mutex_unlock(&mvm->mutex);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
+ ret = iwl_mvm_scan_request(mvm, vif, req);
+ if (ret)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+out:
+ mutex_unlock(&mvm->mutex);
+ /* make sure to flush the Rx handler before the next scan arrives */
+ iwl_mvm_wait_for_async_handlers(mvm);
return ret;
}
@@ -1186,20 +1563,32 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid,
+ struct ieee80211_sta *sta, u16 tids,
int num_frames,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* TODO: how do we tell the fw to send frames for a specific TID */
+ /* Called when we need to transmit (a) frame(s) from mac80211 */
- /*
- * The fw will send EOSP notification when the last frame will be
- * transmitted.
- */
- iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames);
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, false);
+}
+
+static void
+iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* Called when we need to transmit (a) frame(s) from agg queue */
+
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, true);
}
static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
@@ -1209,11 +1598,25 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int tid;
switch (cmd) {
case STA_NOTIFY_SLEEP:
if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data;
+
+ tid_data = &mvmsta->tid_data[tid];
+ if (tid_data->state != IWL_AGG_ON &&
+ tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
+ continue;
+ if (iwl_mvm_tid_queued(tid_data) == 0)
+ continue;
+ ieee80211_sta_set_buffered(sta, tid, true);
+ }
+ spin_unlock_bh(&mvmsta->lock);
/*
* The fw updates the STA to be asleep. Tx packets on the Tx
* queues to this station will not be transmitted. The fw will
@@ -1304,12 +1707,14 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
+ if (vif->bss_conf.dtim_period)
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
+ CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* disable beacon filtering */
- WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif));
+ WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
@@ -1401,9 +1806,26 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
- IWL_DEBUG_SCAN(mvm,
- "SCHED SCAN request during internal scan - abort\n");
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_OS:
+ IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
+ ret = iwl_mvm_cancel_scan(mvm);
+ if (ret) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * iwl_mvm_rx_scan_complete() will be called soon but will
+ * not reset the scan status as it won't be IWL_MVM_SCAN_OS
+ * any more since we queue the next scan immediately (below).
+ * We make sure it is called before the next scan starts by
+ * flushing the async-handlers work.
+ */
+ break;
+ case IWL_MVM_SCAN_NONE:
+ break;
+ default:
ret = -EBUSY;
goto out;
}
@@ -1425,17 +1847,23 @@ err:
mvm->scan_status = IWL_MVM_SCAN_NONE;
out:
mutex_unlock(&mvm->mutex);
+ /* make sure to flush the Rx handler before the next scan arrives */
+ iwl_mvm_wait_for_async_handlers(mvm);
return ret;
}
-static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
mutex_lock(&mvm->mutex);
- iwl_mvm_sched_scan_stop(mvm);
+ ret = iwl_mvm_sched_scan_stop(mvm);
mutex_unlock(&mvm->mutex);
+ iwl_mvm_wait_for_async_handlers(mvm);
+
+ return ret;
}
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
@@ -1773,8 +2201,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
* Power state must be updated before quotas,
* otherwise fw will complain.
*/
- mvm->bound_vif_cnt++;
- iwl_mvm_power_update_binding(mvm, vif, true);
+ iwl_mvm_power_update_mac(mvm, vif);
/* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
@@ -1791,8 +2218,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
- mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
if (ret)
@@ -1824,8 +2250,7 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
iwl_mvm_binding_remove_vif(mvm, vif);
- mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mvmvif->phy_ctxt = NULL;
@@ -1892,8 +2317,9 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif);
- return iwl_mvm_disable_beacon_filter(mvm, vif);
+ return iwl_mvm_enable_beacon_filter(mvm, vif,
+ CMD_SYNC);
+ return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
}
return -EOPNOTSUPP;
@@ -1914,7 +2340,7 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
-struct ieee80211_ops iwl_mvm_hw_ops = {
+const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
.start = iwl_mvm_mac_start,
@@ -1932,6 +2358,7 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.sta_state = iwl_mvm_mac_sta_state,
.sta_notify = iwl_mvm_mac_sta_notify,
.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+ .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
.sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 2b0ba1fc3c82..d564233a65da 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -91,9 +91,7 @@ enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_MCAST = 5,
};
-extern struct ieee80211_ops iwl_mvm_hw_ops;
-extern const struct iwl_mvm_power_ops pm_legacy_ops;
-extern const struct iwl_mvm_power_ops pm_mac_ops;
+extern const struct ieee80211_ops iwl_mvm_hw_ops;
/**
* struct iwl_mvm_mod_params - module parameters for iwlmvm
@@ -159,20 +157,6 @@ enum iwl_power_scheme {
IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
-struct iwl_mvm_power_ops {
- int (*power_update_mode)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
- int (*power_update_device_mode)(struct iwl_mvm *mvm);
- int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
- void (*power_update_binding)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool assign);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- char *buf, int bufsz);
-#endif
-};
-
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
@@ -239,6 +223,19 @@ enum iwl_mvm_smps_type_request {
NUM_IWL_MVM_SMPS_REQ,
};
+enum iwl_mvm_ref_type {
+ IWL_MVM_REF_UCODE_DOWN,
+ IWL_MVM_REF_SCAN,
+ IWL_MVM_REF_ROC,
+ IWL_MVM_REF_P2P_CLIENT,
+ IWL_MVM_REF_AP_IBSS,
+ IWL_MVM_REF_USER,
+ IWL_MVM_REF_TX,
+ IWL_MVM_REF_TX_AGG,
+
+ IWL_MVM_REF_COUNT,
+};
+
/**
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
@@ -269,7 +266,9 @@ struct iwl_mvm_vif_bf_data {
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
* should get quota etc.
* @monitor_active: indicates that monitor context is configured, and that the
- * interface should get quota etc.
+ * interface should get quota etc.
+ * @low_latency: indicates that this interface is in low-latency mode
+ * (VMACLowLatencyMode)
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
@@ -285,6 +284,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_ibss_active;
bool monitor_active;
+ bool low_latency;
struct iwl_mvm_vif_bf_data bf_data;
u32 ap_beacon_time;
@@ -319,13 +319,13 @@ struct iwl_mvm_vif {
bool seqno_valid;
u16 seqno;
+#endif
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 addresses for WoWLAN */
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
int num_target_ipv6_addrs;
#endif
-#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_mvm *mvm;
@@ -333,14 +333,13 @@ struct iwl_mvm_vif {
struct dentry *dbgfs_slink;
struct iwl_dbgfs_pm dbgfs_pm;
struct iwl_dbgfs_bf dbgfs_bf;
+ struct iwl_mac_power_cmd mac_pwr_cmd;
#endif
enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
/* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN];
-
- bool pm_prevented;
};
static inline struct iwl_mvm_vif *
@@ -349,6 +348,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
return (void *)vif->drv_priv;
}
+extern const u8 tid_to_mac80211_ac[];
+
enum iwl_scan_status {
IWL_MVM_SCAN_NONE,
IWL_MVM_SCAN_OS,
@@ -415,6 +416,7 @@ struct iwl_tt_params {
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @min_backoff: The minimal tx backoff due to power restrictions
* @params: Parameters to configure the thermal throttling algorithm.
* @throttle: Is thermal throttling is active?
*/
@@ -422,10 +424,33 @@ struct iwl_mvm_tt_mgmt {
struct delayed_work ct_kill_exit;
bool dynamic_smps;
u32 tx_backoff;
+ u32 min_backoff;
const struct iwl_tt_params *params;
bool throttle;
};
+#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
+
+struct iwl_mvm_frame_stats {
+ u32 legacy_frames;
+ u32 ht_frames;
+ u32 vht_frames;
+ u32 bw_20_frames;
+ u32 bw_40_frames;
+ u32 bw_80_frames;
+ u32 bw_160_frames;
+ u32 sgi_frames;
+ u32 ngi_frames;
+ u32 siso_frames;
+ u32 mimo2_frames;
+ u32 agg_frames;
+ u32 ampdu_count;
+ u32 success_frames;
+ u32 fail_frames;
+ u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
+ int last_frame_idx;
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -457,6 +482,8 @@ struct iwl_mvm {
bool init_ucode_complete;
u32 error_event_table;
u32 log_event_table;
+ u32 umac_error_event_table;
+ bool support_umac_log;
u32 ampdu_ref;
@@ -470,7 +497,7 @@ struct iwl_mvm {
struct iwl_nvm_data *nvm_data;
/* NVM sections */
- struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
+ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
/* EEPROM MAC addresses */
struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@@ -494,6 +521,17 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* broadcast filters to configure for each associated station */
+ const struct iwl_fw_bcast_filter *bcast_filters;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct {
+ u32 override; /* u32 for debugfs_create_bool */
+ struct iwl_bcast_filter_cmd cmd;
+ } dbgfs_bcast_filtering;
+#endif
+#endif
+
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
@@ -506,6 +544,7 @@ struct iwl_mvm {
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
+ u32 dbgfs_prph_reg_addr;
bool disable_power_off;
bool disable_power_off_d3;
@@ -513,6 +552,9 @@ struct iwl_mvm {
struct debugfs_blob_wrapper nvm_sw_blob;
struct debugfs_blob_wrapper nvm_calib_blob;
struct debugfs_blob_wrapper nvm_prod_blob;
+
+ struct iwl_mvm_frame_stats drv_rx_stats;
+ spinlock_t drv_stats_lock;
#endif
struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -526,10 +568,16 @@ struct iwl_mvm {
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ /* A bitmap of reference types taken by the driver. */
+ unsigned long ref_bitmap[BITS_TO_LONGS(IWL_MVM_REF_COUNT)];
+
u8 vif_count;
/* -1 for always, 0 for never, >0 for that many times */
s8 restart_fw;
+ void *fw_error_dump;
+ void *fw_error_sram;
+ u32 fw_error_sram_len;
struct led_classdev led;
@@ -548,17 +596,27 @@ struct iwl_mvm {
#endif
#endif
+ /* d0i3 */
+ u8 d0i3_ap_sta_id;
+ bool d0i3_offloading;
+ struct work_struct d0i3_exit_work;
+ struct sk_buff_head d0i3_tx;
+ /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
+ spinlock_t d0i3_tx_lock;
+ wait_queue_head_t d0i3_exit_waitq;
+
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
+ u32 last_ant_isol;
+ u8 last_corun_lut;
+ u8 bt_tx_prio;
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
- const struct iwl_mvm_power_ops *pm_ops;
-
#ifdef CONFIG_NL80211_TESTMODE
u32 noa_duration;
struct ieee80211_vif *noa_vif;
@@ -569,10 +627,10 @@ struct iwl_mvm {
u8 first_agg_queue;
u8 last_agg_queue;
- u8 bound_vif_cnt;
-
/* Indicate if device power save is allowed */
- bool ps_prevented;
+ bool ps_disabled;
+ /* Indicate if device power management is allowed */
+ bool pm_disabled;
};
/* Extract MVM priv from op_mode and _hw */
@@ -587,6 +645,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_HW_CTKILL,
IWL_MVM_STATUS_ROC_RUNNING,
IWL_MVM_STATUS_IN_HW_RESTART,
+ IWL_MVM_STATUS_IN_D0I3,
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -595,6 +654,30 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
}
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return NULL;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
+static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
+{
+ return mvm->trans->cfg->d0i3 &&
+ (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+}
+
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@@ -619,7 +702,10 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
-void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
+void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+#endif
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -645,6 +731,11 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
+{
+ flush_work(&mvm->async_handlers_wk);
+}
+
/* Statistics */
int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@@ -661,6 +752,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@@ -676,6 +769,9 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
@@ -730,7 +826,7 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
/* Scheduled scan */
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -744,7 +840,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
-void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
@@ -772,49 +868,24 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
+ struct iwl_mvm_frame_stats *stats,
+ u32 rate, bool agg);
+int rs_pretty_print_rate(char *buf, const u32 rate);
-/* power managment */
-static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- return mvm->pm_ops->power_update_mode(mvm, vif);
-}
-
-static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- return mvm->pm_ops->power_disable(mvm, vif);
-}
-
-static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
-{
- if (mvm->pm_ops->power_update_device_mode)
- return mvm->pm_ops->power_update_device_mode(mvm);
- return 0;
-}
+/* power management */
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
-static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool assign)
-{
- if (mvm->pm_ops->power_update_binding)
- mvm->pm_ops->power_update_binding(mvm, vif, assign);
-}
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ char *buf, int bufsz);
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- char *buf, int bufsz)
-{
- return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
-}
-#endif
-
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@@ -840,6 +911,17 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
#endif
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+ struct iwl_wowlan_config_cmd_v2 *cmd);
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool disable_offloading,
+ u32 cmd_flags);
+
+/* D0i3 */
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -850,10 +932,13 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
-u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta);
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info, u8 ac);
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
enum iwl_bt_kill_msk {
BT_KILL_MSK_DEFAULT,
@@ -875,25 +960,53 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd)
{}
#endif
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags);
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ u32 flags);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd);
+ struct ieee80211_vif *vif,
+ u32 flags);
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable);
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ bool force,
+ u32 flags);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
enum ieee80211_smps_mode smps_request);
+/* Low latency */
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool value);
+/* get SystemLowLatencyMode - only needed for beacon threshold? */
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
+/* get VMACLowLatencyMode */
+static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
+{
+ /*
+ * should this consider associated/active/... state?
+ *
+ * Normally low-latency should only be active on interfaces
+ * that are active, but at least with debugfs it can also be
+ * enabled on interfaces that aren't active. However, when
+ * interface aren't active then they aren't added into the
+ * binding, so this has no real impact. For now, just return
+ * the current desired low-latency state.
+ */
+
+ return mvmvif->low_latency;
+}
+
/* Thermal management and CT-kill */
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 35b71af78d02..cf2d09f53782 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -67,14 +67,6 @@
#include "iwl-eeprom-read.h"
#include "iwl-nvm-parse.h"
-/* list of NVM sections we are allowed/need to read */
-static const int nvm_to_read[] = {
- NVM_SECTION_TYPE_HW,
- NVM_SECTION_TYPE_SW,
- NVM_SECTION_TYPE_CALIBRATION,
- NVM_SECTION_TYPE_PRODUCTION,
-};
-
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
#define IWL_MAX_NVM_SECTION_SIZE 7000
@@ -236,24 +228,39 @@ static struct iwl_nvm_data *
iwl_parse_nvm_sections(struct iwl_mvm *mvm)
{
struct iwl_nvm_section *sections = mvm->nvm_sections;
- const __le16 *hw, *sw, *calib;
+ const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
/* Checking for required sections */
- if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) {
- IWL_ERR(mvm, "Can't parse empty NVM sections\n");
- return NULL;
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
+ IWL_ERR(mvm, "Can't parse empty NVM sections\n");
+ return NULL;
+ }
+ } else {
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data ||
+ !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
+ IWL_ERR(mvm,
+ "Can't parse empty family 8000 NVM sections\n");
+ return NULL;
+ }
}
if (WARN_ON(!mvm->cfg))
return NULL;
- hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
+ hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+ regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+ mac_override =
+ (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
- iwl_fw_valid_tx_ant(mvm->fw),
- iwl_fw_valid_rx_ant(mvm->fw));
+ regulatory, mac_override,
+ mvm->fw->valid_tx_ant,
+ mvm->fw->valid_rx_ant);
}
#define MAX_NVM_FILE_LEN 16384
@@ -293,6 +300,8 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
#define NVM_WORD2_ID(x) (x >> 12)
+#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
+#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
@@ -343,8 +352,16 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
- section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
- section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ section_size =
+ 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
+ section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+ } else {
+ section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
+ le16_to_cpu(file_sec->word2));
+ section_id = NVM_WORD1_ID_FAMILY_8000(
+ le16_to_cpu(file_sec->word1));
+ }
if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
IWL_ERR(mvm, "ERROR - section too large (%d)\n",
@@ -367,7 +384,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
- if (WARN(section_id >= NVM_NUM_OF_SECTIONS,
+ if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
"Invalid NVM section ID %d\n", section_id)) {
ret = -EINVAL;
break;
@@ -414,6 +431,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
{
int ret, i, section;
u8 *nvm_buffer, *temp;
+ int nvm_to_read[NVM_MAX_NUM_SECTIONS];
+ int num_of_sections_to_read;
+
+ if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+ return -EINVAL;
/* load external NVM if configured */
if (iwlwifi_mod_params.nvm_file) {
@@ -422,6 +444,22 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
if (ret)
return ret;
} else {
+ /* list of NVM sections we are allowed/need to read */
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ nvm_to_read[0] = mvm->cfg->nvm_hw_section_num;
+ nvm_to_read[1] = NVM_SECTION_TYPE_SW;
+ nvm_to_read[2] = NVM_SECTION_TYPE_CALIBRATION;
+ nvm_to_read[3] = NVM_SECTION_TYPE_PRODUCTION;
+ num_of_sections_to_read = 4;
+ } else {
+ nvm_to_read[0] = NVM_SECTION_TYPE_SW;
+ nvm_to_read[1] = NVM_SECTION_TYPE_CALIBRATION;
+ nvm_to_read[2] = NVM_SECTION_TYPE_PRODUCTION;
+ nvm_to_read[3] = NVM_SECTION_TYPE_REGULATORY;
+ nvm_to_read[4] = NVM_SECTION_TYPE_MAC_OVERRIDE;
+ num_of_sections_to_read = 5;
+ }
+
/* Read From FW NVM */
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
@@ -430,7 +468,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
GFP_KERNEL);
if (!nvm_buffer)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+ for (i = 0; i < num_of_sections_to_read; i++) {
section = nvm_to_read[i];
/* we override the constness for initial read */
ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
@@ -446,10 +484,6 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
#ifdef CONFIG_IWLWIFI_DEBUGFS
switch (section) {
- case NVM_SECTION_TYPE_HW:
- mvm->nvm_hw_blob.data = temp;
- mvm->nvm_hw_blob.size = ret;
- break;
case NVM_SECTION_TYPE_SW:
mvm->nvm_sw_blob.data = temp;
mvm->nvm_sw_blob.size = ret;
@@ -463,6 +497,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_prod_blob.size = ret;
break;
default:
+ if (section == mvm->cfg->nvm_hw_section_num) {
+ mvm->nvm_hw_blob.data = temp;
+ mvm->nvm_hw_blob.size = ret;
+ break;
+ }
WARN(1, "section: %d", section);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c
new file mode 100644
index 000000000000..9bfb95e89cfb
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/offloading.c
@@ -0,0 +1,215 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include "mvm.h"
+
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+ struct iwl_wowlan_config_cmd_v2 *cmd)
+{
+ int i;
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 *before* using the value while we
+ * increment after using the value (i.e. store the next value to use).
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = mvm_ap_sta->tid_data[i].seq_number;
+ seq -= 0x10;
+ cmd->qos_seq[i] = cpu_to_le16(seq);
+ }
+}
+
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool disable_offloading,
+ u32 cmd_flags)
+{
+ union {
+ struct iwl_proto_offload_cmd_v1 v1;
+ struct iwl_proto_offload_cmd_v2 v2;
+ struct iwl_proto_offload_cmd_v3_small v3s;
+ struct iwl_proto_offload_cmd_v3_large v3l;
+ } cmd = {};
+ struct iwl_host_cmd hcmd = {
+ .id = PROT_OFFLOAD_CONFIG_CMD,
+ .flags = cmd_flags,
+ .data[0] = &cmd,
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
+ struct iwl_proto_offload_cmd_common *common;
+ u32 enabled = 0, size;
+ u32 capa_flags = mvm->fw->ucode_capa.flags;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int i;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+ capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ struct iwl_ns_config *nsc;
+ struct iwl_targ_addr *addrs;
+ int n_nsc, n_addrs;
+ int c;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ nsc = cmd.v3s.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+ addrs = cmd.v3s.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+ } else {
+ nsc = cmd.v3l.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+ addrs = cmd.v3l.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+ }
+
+ if (mvmvif->num_target_ipv6_addrs)
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+
+ /*
+ * For each address we have (and that will fit) fill a target
+ * address struct and combine for NS offload structs with the
+ * solicited node addresses.
+ */
+ for (i = 0, c = 0;
+ i < mvmvif->num_target_ipv6_addrs &&
+ i < n_addrs && c < n_nsc; i++) {
+ struct in6_addr solicited_addr;
+ int j;
+
+ addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+ &solicited_addr);
+ for (j = 0; j < c; j++)
+ if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+ &solicited_addr) == 0)
+ break;
+ if (j == c)
+ c++;
+ addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+ addrs[i].config_num = cpu_to_le32(j);
+ nsc[j].dest_ipv6_addr = solicited_addr;
+ memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+ cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
+ else
+ cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (mvmvif->num_target_ipv6_addrs) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+ memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
+ memcpy(cmd.v2.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v2.target_ipv6_addr[i]));
+ } else {
+ if (mvmvif->num_target_ipv6_addrs) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+ memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
+ memcpy(cmd.v1.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v1.target_ipv6_addr[i]));
+ }
+#endif
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ common = &cmd.v3s.common;
+ size = sizeof(cmd.v3s);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ common = &cmd.v3l.common;
+ size = sizeof(cmd.v3l);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ common = &cmd.v2.common;
+ size = sizeof(cmd.v2);
+ } else {
+ common = &cmd.v1.common;
+ size = sizeof(cmd.v1);
+ }
+
+ if (vif->bss_conf.arp_addr_cnt) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
+ common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+ memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (!disable_offloading)
+ common->enabled = cpu_to_le32(enabled);
+
+ hcmd.len[0] = size;
+ return iwl_mvm_send_cmd(mvm, &hcmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index a3d43de342d7..9545d7fdd4bf 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -61,6 +61,7 @@
*
*****************************************************************************/
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <net/mac80211.h>
#include "iwl-notif-wait.h"
@@ -78,6 +79,7 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw-api-scan.h"
+#include "fw-error-dump.h"
#include "time-event.h"
/*
@@ -185,9 +187,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
- iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
- ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
struct iwl_rx_handlers {
@@ -219,13 +222,17 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
+ RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
+ iwl_mvm_rx_ant_coupling_notif, true),
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+ RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
+
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
- RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+ RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
- iwl_mvm_rx_scan_offload_complete_notif, false),
+ iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
false),
@@ -242,7 +249,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
#undef RX_HANDLER
#define CMD(x) [x] = #x
-static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
+static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(MVM_ALIVE),
CMD(REPLY_ERROR),
CMD(INIT_COMPLETE_NOTIF),
@@ -284,9 +291,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BEACON_NOTIFICATION),
CMD(BEACON_TEMPLATE_CMD),
CMD(STATISTICS_NOTIFICATION),
+ CMD(EOSP_NOTIFICATION),
CMD(REDUCE_TX_POWER_CMD),
CMD(TX_ANT_CONFIGURATION_CMD),
CMD(D3_CONFIG_CMD),
+ CMD(D0I3_END_CMD),
CMD(PROT_OFFLOAD_CONFIG_CMD),
CMD(OFFLOADS_QUERY_CMD),
CMD(REMOTE_WAKE_CONFIG_CMD),
@@ -309,17 +318,37 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG),
CMD(MCAST_FILTER_CMD),
+ CMD(BCAST_FILTER_CMD),
CMD(REPLY_SF_CFG_CMD),
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
CMD(MAC_PM_POWER_TABLE),
CMD(BT_COEX_CI),
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
+ CMD(ANTENNA_COUPLING_NOTIFICATION),
};
#undef CMD
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
+
+static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
+{
+ const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
+
+ if (!pwr_tx_backoff)
+ return 0;
+
+ while (pwr_tx_backoff->pwr) {
+ if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
+ return pwr_tx_backoff->backoff;
+
+ pwr_tx_backoff++;
+ }
+
+ return 0;
+}
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
@@ -333,6 +362,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
TX_CMD,
};
int err, scan_size;
+ u32 min_backoff;
+
+ /*
+ * We use IWL_MVM_STATION_COUNT to check the validity of the station
+ * index all over the driver - check that its value corresponds to the
+ * array size.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
/********************************
* 1. Allocating and configuring HW data
@@ -373,6 +410,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
+ INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
+
+ spin_lock_init(&mvm->d0i3_tx_lock);
+ skb_queue_head_init(&mvm->d0i3_tx);
+ init_waitqueue_head(&mvm->d0i3_exit_waitq);
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
@@ -421,7 +463,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev);
- iwl_mvm_tt_initialize(mvm);
+ min_backoff = calc_min_backoff(trans, cfg);
+ iwl_mvm_tt_initialize(mvm, min_backoff);
/*
* If the NVM exists in an external file,
@@ -462,13 +505,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_unregister;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
- mvm->pm_ops = &pm_mac_ops;
- else
- mvm->pm_ops = &pm_legacy_ops;
-
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+ /* rpm starts with a taken ref. only set the appropriate bit here. */
+ set_bit(IWL_MVM_REF_UCODE_DOWN, mvm->ref_bitmap);
+
return op_mode;
out_unregister:
@@ -495,6 +536,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
+ vfree(mvm->fw_error_dump);
+ kfree(mvm->fw_error_sram);
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
@@ -508,7 +551,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
mvm->phy_db = NULL;
iwl_free_nvm_data(mvm->nvm_data);
- for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
+ for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
ieee80211_free_hw(mvm->hw);
@@ -658,7 +701,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
}
-static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -667,9 +710,9 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- if (state && mvm->cur_ucode != IWL_UCODE_INIT)
- iwl_trans_stop_device(mvm->trans);
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
+
+ return state && mvm->cur_ucode != IWL_UCODE_INIT;
}
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
@@ -703,6 +746,29 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
iwl_abort_notification_waits(&mvm->notif_wait);
/*
+ * This is a bit racy, but worst case we tell mac80211 about
+ * a stopped/aborted scan when that was already done which
+ * is not a problem. It is necessary to abort any os scan
+ * here because mac80211 requires having the scan cleared
+ * before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next start() call from mac80211. If restart isn't called
+ * (no fw restart) scan status will stay busy.
+ */
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_NONE:
+ break;
+ case IWL_MVM_SCAN_OS:
+ ieee80211_scan_completed(mvm->hw, true);
+ break;
+ case IWL_MVM_SCAN_SCHED:
+ /* Sched scan will be restarted by mac80211 in restart_hw. */
+ if (!mvm->restart_fw)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ break;
+ }
+
+ /*
* If we're restarting already, don't cycle restarts.
* If INIT fw asserted, it will likely fail again.
* If WoWLAN fw asserted, don't restart either, mac80211
@@ -733,25 +799,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
- /*
- * This is a bit racy, but worst case we tell mac80211 about
- * a stopped/aborted (sched) scan when that was already done
- * which is not a problem. It is necessary to abort any scan
- * here because mac80211 requires having the scan cleared
- * before restarting.
- * We'll reset the scan_status to NONE in restart cleanup in
- * the next start() call from mac80211.
- */
- switch (mvm->scan_status) {
- case IWL_MVM_SCAN_NONE:
- break;
- case IWL_MVM_SCAN_OS:
- ieee80211_scan_completed(mvm->hw, true);
- break;
- case IWL_MVM_SCAN_SCHED:
- ieee80211_sched_scan_stopped(mvm->hw);
- break;
- }
+ /* don't let the transport/FW power down */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
if (mvm->restart_fw > 0)
mvm->restart_fw--;
@@ -759,13 +808,52 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
}
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
+{
+ struct iwl_fw_error_dump_file *dump_file;
+ struct iwl_fw_error_dump_data *dump_data;
+ u32 file_len;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->fw_error_dump)
+ return;
+
+ file_len = mvm->fw_error_sram_len +
+ sizeof(*dump_file) +
+ sizeof(*dump_data);
+
+ dump_file = vmalloc(file_len);
+ if (!dump_file)
+ return;
+
+ mvm->fw_error_dump = dump_file;
+
+ dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+ dump_file->file_len = cpu_to_le32(file_len);
+ dump_data = (void *)dump_file->data;
+ dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
+ dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
+
+ /*
+ * No need for lock since at the stage the FW isn't loaded. So it
+ * can't assert - we are the only one who can possibly be accessing
+ * mvm->fw_error_sram right now.
+ */
+ memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
+}
+#endif
+
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
iwl_mvm_dump_nic_error_log(mvm);
- if (!mvm->restart_fw)
- iwl_mvm_dump_sram(mvm);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_fw_error_sram_dump(mvm);
+#endif
iwl_mvm_nic_restart(mvm);
}
@@ -778,6 +866,323 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
iwl_mvm_nic_restart(mvm);
}
+struct iwl_d0i3_iter_data {
+ struct iwl_mvm *mvm;
+ u8 ap_sta_id;
+ u8 vif_count;
+ u8 offloading_tid;
+ bool disable_offloading;
+};
+
+static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_d0i3_iter_data *iter_data)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvmsta;
+ u32 available_tids = 0;
+ u8 tid;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
+ mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ return false;
+
+ ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
+ if (IS_ERR_OR_NULL(ap_sta))
+ return false;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ /*
+ * in case of pending tx packets, don't use this tid
+ * for offloading in order to prevent reuse of the same
+ * qos seq counters.
+ */
+ if (iwl_mvm_tid_queued(tid_data))
+ continue;
+
+ if (tid_data->state != IWL_AGG_OFF)
+ continue;
+
+ available_tids |= BIT(tid);
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ /*
+ * disallow protocol offloading if we have no available tid
+ * (with no pending frames and no active aggregation,
+ * as we don't handle "holes" properly - the scheduler needs the
+ * frame's seq number and TFD index to match)
+ */
+ if (!available_tids)
+ return true;
+
+ /* for simplicity, just use the first available tid */
+ iter_data->offloading_tid = ffs(available_tids) - 1;
+ return false;
+}
+
+static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_d0i3_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+
+ IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ /*
+ * in case of pending tx packets or active aggregations,
+ * avoid offloading features in order to prevent reuse of
+ * the same qos seq counters.
+ */
+ if (iwl_mvm_disallow_offloading(mvm, vif, data))
+ data->disable_offloading = true;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
+ iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags);
+
+ /*
+ * on init/association, mvm already configures POWER_TABLE_CMD
+ * and REPLY_MCAST_FILTER_CMD, so currently don't
+ * reconfigure them (we might want to use different
+ * params later on, though).
+ */
+ data->ap_sta_id = mvmvif->ap_sta_id;
+ data->vif_count++;
+}
+
+static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
+ struct iwl_wowlan_config_cmd_v3 *cmd,
+ struct iwl_d0i3_iter_data *iter_data)
+{
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvm_ap_sta;
+
+ if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
+ return;
+
+ rcu_read_lock();
+
+ ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
+ if (IS_ERR_OR_NULL(ap_sta))
+ goto out;
+
+ mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+ cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
+ cmd->offloading_tid = iter_data->offloading_tid;
+
+ /*
+ * The d0i3 uCode takes care of the nonqos counters,
+ * so configure only the qos seq ones.
+ */
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common);
+out:
+ rcu_read_unlock();
+}
+static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+ int ret;
+ struct iwl_d0i3_iter_data d0i3_iter_data = {
+ .mvm = mvm,
+ };
+ struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {
+ .common = {
+ .wakeup_filter =
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING),
+ },
+ };
+ struct iwl_d3_manager_config d3_cfg_cmd = {
+ .min_sleep_time = cpu_to_le32(1000),
+ };
+
+ IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
+
+ /* make sure we have no running tx while configuring the qos */
+ set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ synchronize_net();
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_enter_d0i3_iterator,
+ &d0i3_iter_data);
+ if (d0i3_iter_data.vif_count == 1) {
+ mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
+ mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
+ } else {
+ WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_offloading = false;
+ }
+
+ iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
+ sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
+ flags | CMD_MAKE_TRANS_IDLE,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+}
+
+static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
+
+ IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
+}
+
+static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
+ mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ ieee80211_connection_loss(vif);
+}
+
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct iwl_mvm_sta *mvm_ap_sta;
+ int i;
+ bool wake_queues = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->d0i3_tx_lock);
+
+ if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
+ goto out;
+
+ IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
+
+ /* get the sta in order to update seq numbers and re-enqueue skbs */
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (IS_ERR_OR_NULL(sta)) {
+ sta = NULL;
+ goto out;
+ }
+
+ if (mvm->d0i3_offloading && qos_seq) {
+ /* update qos seq numbers if offloading was enabled */
+ mvm_ap_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = le16_to_cpu(qos_seq[i]);
+ /* firmware stores last-used one, we store next one */
+ seq += 0x10;
+ mvm_ap_sta->tid_data[i].seq_number = seq;
+ }
+ }
+out:
+ /* re-enqueue (or drop) all packets */
+ while (!skb_queue_empty(&mvm->d0i3_tx)) {
+ struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
+
+ if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
+ ieee80211_free_txskb(mvm->hw, skb);
+
+ /* if the skb_queue is not empty, we need to wake queues */
+ wake_queues = true;
+ }
+ clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ wake_up(&mvm->d0i3_exit_waitq);
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ if (wake_queues)
+ ieee80211_wake_queues(mvm->hw);
+
+ spin_unlock_bh(&mvm->d0i3_tx_lock);
+}
+
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
+ struct iwl_host_cmd get_status_cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
+ };
+ struct iwl_wowlan_status_v6 *status;
+ int ret;
+ u32 disconnection_reasons, wakeup_reasons;
+ __le16 *qos_seq = NULL;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
+ if (ret)
+ goto out;
+
+ if (!get_status_cmd.resp_pkt)
+ goto out;
+
+ status = (void *)get_status_cmd.resp_pkt->data;
+ wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
+ qos_seq = status->qos_seq_ctr;
+
+ IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
+
+ disconnection_reasons =
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+ if (wakeup_reasons & disconnection_reasons)
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d0i3_disconnect_iter, mvm);
+
+ iwl_free_resp(&get_status_cmd);
+out:
+ iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+ CMD_WAKE_UP_TRANS;
+ int ret;
+
+ IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+ if (ret)
+ goto out;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_exit_d0i3_iterator,
+ mvm);
+out:
+ schedule_work(&mvm->d0i3_exit_work);
+ return ret;
+}
+
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
@@ -789,4 +1194,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
.nic_error = iwl_mvm_nic_error,
.cmd_queue_full = iwl_mvm_cmd_queue_full,
.nic_config = iwl_mvm_nic_config,
+ .enter_d0i3 = iwl_mvm_enter_d0i3,
+ .exit_d0i3 = iwl_mvm_exit_d0i3,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index b7268c0b3333..237efe0ac1c4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -156,13 +156,13 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
idle_cnt = chains_static;
active_cnt = chains_dynamic;
- cmd->rxchain_info = cpu_to_le32(iwl_fw_valid_rx_ant(mvm->fw) <<
+ cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
PHY_RX_CHAIN_VALID_POS);
cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
cmd->rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS);
- cmd->txchain_info = cpu_to_le32(iwl_fw_valid_tx_ant(mvm->fw));
+ cmd->txchain_info = cpu_to_le32(mvm->fw->valid_tx_ant);
}
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d9eab3b7bb9f..6b636eab3339 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -74,39 +74,36 @@
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd)
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 flags)
{
- int ret;
-
- ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC,
- sizeof(struct iwl_beacon_filter_cmd), cmd);
-
- if (!ret) {
- IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
- le32_to_cpu(cmd->ba_enable_beacon_abort));
- IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
- le32_to_cpu(cmd->ba_escape_timer));
- IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
- le32_to_cpu(cmd->bf_debug_flag));
- IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
- le32_to_cpu(cmd->bf_enable_beacon_filter));
- IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
- le32_to_cpu(cmd->bf_energy_delta));
- IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
- le32_to_cpu(cmd->bf_escape_timer));
- IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
- le32_to_cpu(cmd->bf_roaming_energy_delta));
- IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
- le32_to_cpu(cmd->bf_roaming_state));
- IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
- le32_to_cpu(cmd->bf_temp_threshold));
- IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
- le32_to_cpu(cmd->bf_temp_fast_filter));
- IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
- le32_to_cpu(cmd->bf_temp_slow_filter));
- }
- return ret;
+ IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+ le32_to_cpu(cmd->ba_enable_beacon_abort));
+ IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+ le32_to_cpu(cmd->ba_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+ le32_to_cpu(cmd->bf_debug_flag));
+ IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+ le32_to_cpu(cmd->bf_enable_beacon_filter));
+ IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+ le32_to_cpu(cmd->bf_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_state));
+ IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+ le32_to_cpu(cmd->bf_temp_threshold));
+ IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_fast_filter));
+ IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_slow_filter));
+
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ sizeof(struct iwl_beacon_filter_cmd), cmd);
}
static
@@ -145,7 +142,7 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
mvmvif->bf_data.ba_enabled = enable;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@@ -301,8 +298,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
- mvm->ps_prevented)
+ if (mvm->ps_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -312,7 +308,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
mvmvif->dbgfs_pm.disable_power_off)
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
- if (!vif->bss_conf.ps || mvmvif->pm_prevented)
+ if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
+ mvm->pm_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -419,72 +416,44 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
-static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
+static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- int ret;
- bool ba_enable;
struct iwl_mac_power_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION)
return 0;
if (vif->p2p &&
- !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS))
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
return 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
- sizeof(cmd), &cmd);
- if (ret)
- return ret;
-
- ba_enable = !!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
- return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mac_power_cmd cmd = {};
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
- mvmvif->color));
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
- cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+ memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
- iwl_mvm_power_log(mvm, &cmd);
- return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
sizeof(cmd), &cmd);
}
-static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
{
struct iwl_device_power_cmd cmd = {
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
};
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+ return 0;
+
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
return 0;
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
- force_disable)
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ mvm->ps_disabled = true;
+
+ if (mvm->ps_disabled)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -501,11 +470,6 @@ static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
&cmd);
}
-static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
-{
- return _iwl_mvm_power_update_device(mvm, false);
-}
-
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -544,44 +508,176 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
return 0;
}
-static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_power_constraint {
+ struct ieee80211_vif *bf_vif;
+ struct ieee80211_vif *bss_vif;
+ struct ieee80211_vif *p2p_vif;
+ u16 bss_phyctx_id;
+ u16 p2p_phyctx_id;
+ bool pm_disabled;
+ bool ps_disabled;
+ struct iwl_mvm *mvm;
+};
+
+static void iwl_mvm_power_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = _data;
- int ret;
+ struct iwl_power_constraint *power_iterator = _data;
+ struct iwl_mvm *mvm = power_iterator->mvm;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ /* no BSS power mgmt if we have an active AP */
+ if (mvmvif->ap_ibss_active)
+ power_iterator->pm_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_MONITOR:
+ /* no BSS power mgmt and no device power save */
+ power_iterator->pm_disabled = true;
+ power_iterator->ps_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (mvmvif->phy_ctxt)
+ power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
+
+ /* we should have only one P2P vif */
+ WARN_ON(power_iterator->p2p_vif);
+ power_iterator->p2p_vif = vif;
+
+ IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
+ power_iterator->p2p_phyctx_id,
+ power_iterator->bss_phyctx_id);
+ if (!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+ /* no BSS power mgmt if we have a P2P client*/
+ power_iterator->pm_disabled = true;
+ } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
+ power_iterator->bss_phyctx_id < MAX_PHYS &&
+ power_iterator->p2p_phyctx_id ==
+ power_iterator->bss_phyctx_id) {
+ power_iterator->pm_disabled = true;
+ }
+ break;
+
+ case NL80211_IFTYPE_STATION:
+ if (mvmvif->phy_ctxt)
+ power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
+
+ /* we should have only one BSS vif */
+ WARN_ON(power_iterator->bss_vif);
+ power_iterator->bss_vif = vif;
+
+ if (mvmvif->bf_data.bf_enabled &&
+ !WARN_ON(power_iterator->bf_vif))
+ power_iterator->bf_vif = vif;
+
+ IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
+ power_iterator->p2p_phyctx_id,
+ power_iterator->bss_phyctx_id);
+ if (mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
+ (power_iterator->p2p_phyctx_id < MAX_PHYS &&
+ power_iterator->bss_phyctx_id < MAX_PHYS &&
+ power_iterator->p2p_phyctx_id ==
+ power_iterator->bss_phyctx_id))
+ power_iterator->pm_disabled = true;
+ break;
+
+ default:
+ break;
+ }
+}
- mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
+static void
+iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
+ struct iwl_power_constraint *constraint)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+ constraint->pm_disabled = true;
+ constraint->ps_disabled = true;
+ }
- ret = iwl_mvm_power_mac_update_mode(mvm, vif);
- WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_iterator, constraint);
}
-static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool assign)
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_power_constraint constraint = {
+ .p2p_phyctx_id = MAX_PHYS,
+ .bss_phyctx_id = MAX_PHYS,
+ .mvm = mvm,
+ };
+ bool ba_enable;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+ return 0;
+
+ iwl_mvm_power_get_global_constraint(mvm, &constraint);
+ mvm->ps_disabled = constraint.ps_disabled;
+ mvm->pm_disabled = constraint.pm_disabled;
+
+ /* don't update device power state unless we add / remove monitor */
if (vif->type == NL80211_IFTYPE_MONITOR) {
- int ret = _iwl_mvm_power_update_device(mvm, assign);
- mvm->ps_prevented = assign;
- WARN_ONCE(ret, "Failed to update power device state\n");
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ return ret;
}
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_binding_iterator,
- mvm);
+ if (constraint.bss_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (constraint.p2p_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (!constraint.bf_vif)
+ return 0;
+
+ vif = constraint.bf_vif;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
+ !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
+
+ return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, char *buf,
- int bufsz)
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_power_cmd cmd = {};
int pos = 0;
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ if (WARN_ON(!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
+ return 0;
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
+ mutex_unlock(&mvm->mutex);
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
@@ -685,32 +781,46 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
}
#endif
-int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 cmd_flags,
+ bool d0i3)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = cpu_to_le32(1),
- };
int ret;
if (mvmvif != mvm->bf_allowed_vif ||
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
- iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+ if (!d0i3)
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
- if (!ret)
+ /* don't change bf_enabled in case of temporary d0i3 configuration */
+ if (!ret && !d0i3)
mvmvif->bf_data.bf_enabled = true;
return ret;
}
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags)
+{
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
+}
+
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ u32 flags)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -720,7 +830,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
if (!ret)
mvmvif->bf_data.bf_enabled = false;
@@ -728,23 +838,89 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags)
{
+ int ret;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_power_cmd cmd = {};
- if (!mvmvif->bf_data.bf_enabled)
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- return iwl_mvm_enable_beacon_filter(mvm, vif);
-}
+ if (!vif->bss_conf.assoc)
+ return 0;
-const struct iwl_mvm_power_ops pm_mac_ops = {
- .power_update_mode = iwl_mvm_power_mac_update_mode,
- .power_update_device_mode = iwl_mvm_power_update_device,
- .power_disable = iwl_mvm_power_mac_disable,
- .power_update_binding = _iwl_mvm_power_update_binding,
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ if (enable) {
+ /* configure skip over dtim up to 300 msec */
+ int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+ if (WARN_ON(!dtimper_msec))
+ return 0;
+
+ cmd.flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd.skip_dtim_periods = 300 / dtimper_msec;
+ }
+ iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
+ memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
-};
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
+ sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ /* configure beacon filtering */
+ if (mvmvif != mvm->bf_allowed_vif)
+ return 0;
+
+ if (enable) {
+ struct iwl_beacon_filter_cmd cmd_bf = {
+ IWL_BF_CMD_CONFIG_D0I3,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+ ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+ flags, true);
+ } else {
+ if (mvmvif->bf_data.bf_enabled)
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+ else
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool force,
+ u32 flags)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif != mvm->bf_allowed_vif)
+ return 0;
+
+ if (!mvmvif->bf_data.bf_enabled) {
+ /* disable beacon filtering explicitly if force is true */
+ if (force)
+ return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ return 0;
+ }
+
+ return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+}
+
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
+{
+ struct iwl_powertable_cmd cmd = {
+ .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
deleted file mode 100644
index ef712ae5bc62..000000000000
--- a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-debug.h"
-#include "mvm.h"
-#include "iwl-modparams.h"
-#include "fw-api-power.h"
-
-#define POWER_KEEP_ALIVE_PERIOD_SEC 25
-
-static void iwl_mvm_power_log(struct iwl_mvm *mvm,
- struct iwl_powertable_cmd *cmd)
-{
- IWL_DEBUG_POWER(mvm,
- "Sending power table command for power level %d, flags = 0x%X\n",
- iwlmvm_mod_params.power_scheme,
- le16_to_cpu(cmd->flags));
- IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
-
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
- le32_to_cpu(cmd->tx_data_timeout));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
- IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
- le32_to_cpu(cmd->skip_dtim_periods));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
- le32_to_cpu(cmd->lprx_rssi_threshold));
- }
-}
-
-static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_powertable_cmd *cmd)
-{
- struct ieee80211_hw *hw = mvm->hw;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_channel *chan;
- int dtimper, dtimper_msec;
- int keep_alive;
- bool radar_detect = false;
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- /*
- * Regardless of power management state the driver must set
- * keep alive period. FW will use it for sending keep alive NDPs
- * immediately after association.
- */
- cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
-
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
- return;
-
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.assoc)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
- if (!vif->bss_conf.ps)
- return;
-
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
- if (vif->bss_conf.beacon_rate &&
- (vif->bss_conf.beacon_rate->bitrate == 10 ||
- vif->bss_conf.beacon_rate->bitrate == 60)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- cmd->lprx_rssi_threshold =
- cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
- }
-
- dtimper = hw->conf.ps_dtim_period ?: 1;
-
- /* Check if radar detection is required on current channel */
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- WARN_ON(!chanctx_conf);
- if (chanctx_conf) {
- chan = chanctx_conf->def.chan;
- radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
- }
- rcu_read_unlock();
-
- /* Check skip over DTIM conditions */
- if (!radar_detect && (dtimper <= 10) &&
- (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
- mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- cmd->skip_dtim_periods = cpu_to_le32(3);
- }
-
- /* Check that keep alive period is at least 3 * DTIM */
- dtimper_msec = dtimper * vif->bss_conf.beacon_int;
- keep_alive = max_t(int, 3 * dtimper_msec,
- MSEC_PER_SEC * cmd->keep_alive_seconds);
- keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
- cmd->keep_alive_seconds = keep_alive;
-
- if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
- cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- } else {
- cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
- cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
- if (mvmvif->dbgfs_pm.skip_over_dtim)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- else
- cmd->flags &=
- cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- }
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
- cmd->rx_data_timeout =
- cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
- cmd->tx_data_timeout =
- cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
- cmd->skip_dtim_periods =
- cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
- if (mvmvif->dbgfs_pm.lprx_ena)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- else
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
- }
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
- cmd->lprx_rssi_threshold =
- cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
-#endif /* CONFIG_IWLWIFI_DEBUGFS */
-}
-
-static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- int ret;
- bool ba_enable;
- struct iwl_powertable_cmd cmd = {};
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- /*
- * TODO: The following vif_count verification is temporary condition.
- * Avoid power mode update if more than one interface is currently
- * active. Remove this condition when FW will support power management
- * on multiple MACs.
- */
- IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count > 1)
- return 0;
-
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
- iwl_mvm_power_log(mvm, &cmd);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
- if (ret)
- return ret;
-
- ba_enable = !!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
- return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_powertable_cmd cmd = {};
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
- cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
- iwl_mvm_power_log(mvm, &cmd);
-
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
- sizeof(cmd), &cmd);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, char *buf,
- int bufsz)
-{
- struct iwl_powertable_cmd cmd = {};
- int pos = 0;
-
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
- pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
- le32_to_cpu(cmd.skip_dtim_periods));
- pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
- iwlmvm_mod_params.power_scheme);
- pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
- le16_to_cpu(cmd.flags));
- pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
- cmd.keep_alive_seconds);
-
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
- 1 : 0);
- pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
- le32_to_cpu(cmd.rx_data_timeout));
- pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
- le32_to_cpu(cmd.tx_data_timeout));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- pos += scnprintf(buf+pos, bufsz-pos,
- "lprx_rssi_threshold = %d\n",
- le32_to_cpu(cmd.lprx_rssi_threshold));
- }
- return pos;
-}
-#endif
-
-const struct iwl_mvm_power_ops pm_legacy_ops = {
- .power_update_mode = iwl_mvm_power_legacy_update_mode,
- .power_disable = iwl_mvm_power_legacy_disable,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
-#endif
-};
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index ce5db6c4ef7e..35e86e06dffd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -65,9 +65,14 @@
#include "fw-api.h"
#include "mvm.h"
+#define QUOTA_100 IWL_MVM_MAX_QUOTA
+#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
+
struct iwl_mvm_quota_iterator_data {
int n_interfaces[MAX_BINDINGS];
int colors[MAX_BINDINGS];
+ int low_latency[MAX_BINDINGS];
+ int n_low_latency_bindings;
struct ieee80211_vif *new_vif;
};
@@ -107,22 +112,29 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
if (vif->bss_conf.assoc)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
if (mvmvif->ap_ibss_active)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_MONITOR:
if (mvmvif->monitor_active)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_P2P_DEVICE:
- break;
+ return;
default:
WARN_ON_ONCE(1);
- break;
+ return;
+ }
+
+ data->n_interfaces[id]++;
+
+ if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
+ data->n_low_latency_bindings++;
+ data->low_latency[id] = true;
}
}
@@ -162,7 +174,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd = {};
- int i, idx, ret, num_active_macs, quota, quota_rem;
+ int i, idx, ret, num_active_macs, quota, quota_rem, n_non_lowlat;
struct iwl_mvm_quota_iterator_data data = {
.n_interfaces = {},
.colors = { -1, -1, -1, -1 },
@@ -197,11 +209,39 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
num_active_macs += data.n_interfaces[i];
}
- quota = 0;
- quota_rem = 0;
- if (num_active_macs) {
- quota = IWL_MVM_MAX_QUOTA / num_active_macs;
- quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
+ n_non_lowlat = num_active_macs;
+
+ if (data.n_low_latency_bindings == 1) {
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (data.low_latency[i]) {
+ n_non_lowlat -= data.n_interfaces[i];
+ break;
+ }
+ }
+ }
+
+ if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
+ /*
+ * Reserve quota for the low latency binding in case that
+ * there are several data bindings but only a single
+ * low latency one. Split the rest of the quota equally
+ * between the other data interfaces.
+ */
+ quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
+ quota_rem = QUOTA_100 - n_non_lowlat * quota -
+ QUOTA_LOWLAT_MIN;
+ } else if (num_active_macs) {
+ /*
+ * There are 0 or more than 1 low latency bindings, or all the
+ * data interfaces belong to the single low latency binding.
+ * Split the quota equally between the data interfaces.
+ */
+ quota = QUOTA_100 / num_active_macs;
+ quota_rem = QUOTA_100 % num_active_macs;
+ } else {
+ /* values don't really matter - won't be used */
+ quota = 0;
+ quota_rem = 0;
}
for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -211,19 +251,37 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
cmd.quotas[idx].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
- if (data.n_interfaces[i] <= 0) {
+ if (data.n_interfaces[i] <= 0)
cmd.quotas[idx].quota = cpu_to_le32(0);
- cmd.quotas[idx].max_duration = cpu_to_le32(0);
- } else {
+ else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
+ data.low_latency[i])
+ /*
+ * There is more than one binding, but only one of the
+ * bindings is in low latency. For this case, allocate
+ * the minimal required quota for the low latency
+ * binding.
+ */
+ cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+ else
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
- cmd.quotas[idx].max_duration = cpu_to_le32(0);
- }
+
+ WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
+ "Binding=%d, quota=%u > max=%u\n",
+ idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
+
+ cmd.quotas[idx].max_duration = cpu_to_le32(0);
+
idx++;
}
- /* Give the remainder of the session to the first binding */
- le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+ /* Give the remainder of the session to the first data binding */
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
+ le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
+ break;
+ }
+ }
iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 6abf74e1351f..568abd61b14f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -166,7 +166,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return false;
- if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
+ if (num_of_ant(mvm->fw->valid_tx_ant) < 2)
return false;
if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
@@ -211,9 +211,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
.next_columns = {
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_MIMO2_SGI,
},
},
[RS_COLUMN_LEGACY_ANT_B] = {
@@ -221,10 +221,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_B,
.next_columns = {
RS_COLUMN_LEGACY_ANT_A,
+ RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_MIMO2_SGI,
},
},
[RS_COLUMN_SISO_ANT_A] = {
@@ -234,8 +234,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_A_SGI,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_MIMO2_SGI,
},
.checks = {
rs_siso_allow,
@@ -248,8 +248,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_B_SGI,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_MIMO2_SGI,
},
.checks = {
rs_siso_allow,
@@ -263,8 +263,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
},
.checks = {
rs_siso_allow,
@@ -279,8 +279,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_MIMO2,
},
.checks = {
rs_siso_allow,
@@ -292,10 +292,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_AB,
.next_columns = {
RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
@@ -307,10 +307,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
.sgi = true,
.next_columns = {
RS_COLUMN_SISO_ANT_A_SGI,
+ RS_COLUMN_SISO_ANT_B_SGI,
+ RS_COLUMN_SISO_ANT_A,
+ RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
- RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
@@ -380,49 +380,49 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
* (2.4 GHz) band.
*/
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
};
/* Expected TpT tables. 4 indexes:
* 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
*/
-static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
{0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
{0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
};
-static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
{0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
{0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
};
-static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
{0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
{0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
{0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
{0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
{0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
};
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
{0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
{0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
};
-static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
{0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
{0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
@@ -503,6 +503,14 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
window->average_tpt = IWL_INVALID_VALUE;
}
+static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl)
+{
+ int i;
+
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ rs_rate_scale_clear_window(&tbl->win[i]);
+}
+
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
{
return (ant_type & valid_antenna) == ant_type;
@@ -566,19 +574,13 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
* at this rate. window->data contains the bitmask of successful
* packets.
*/
-static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes)
+static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes,
+ struct iwl_rate_scale_data *window)
{
- struct iwl_rate_scale_data *window = NULL;
static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
s32 fail_count, tpt;
- if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
- return -EINVAL;
-
- /* Select window for current tx bit rate */
- window = &(tbl->win[scale_index]);
-
/* Get expected throughput */
tpt = get_expected_tpt(tbl, scale_index);
@@ -636,6 +638,21 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
return 0;
}
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
+{
+ struct iwl_rate_scale_data *window = NULL;
+
+ if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+ return -EINVAL;
+
+ /* Select window for current tx bit rate */
+ window = &(tbl->win[scale_index]);
+
+ return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+ window);
+}
+
/* Convert rs_rate object into ucode rate bitmask */
static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
struct rs_rate *rate)
@@ -905,7 +922,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
rate->bw = RATE_MCS_CHAN_WIDTH_20;
- WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
+ WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
rate->index > IWL_RATE_MCS_9_INDEX);
rate->index = rs_ht_to_legacy[rate->index];
@@ -917,7 +934,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
if (num_of_ant(rate->ant) > 1)
- rate->ant = first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+ rate->ant = first_antenna(mvm->fw->valid_tx_ant);
/* Relevant in both switching to SISO or Legacy */
rate->sgi = false;
@@ -1169,12 +1186,12 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->visited_columns = 0;
}
-static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
const struct rs_tx_column *column,
u32 bw)
{
/* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
column->mode != RS_SISO &&
@@ -1262,9 +1279,8 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
-
/* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
+ const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@@ -1362,7 +1378,6 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
{
struct iwl_scale_tbl_info *tbl;
- int i;
int active_tbl;
int flush_interval_passed = 0;
struct iwl_mvm *mvm;
@@ -1423,9 +1438,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
IWL_DEBUG_RATE(mvm,
"LQ: stay in table clear win\n");
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(
- &(tbl->win[i]));
+ rs_rate_scale_clear_tbl_windows(tbl);
}
}
@@ -1434,8 +1447,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
* "search" table). */
if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&(tbl->win[i]));
+ rs_rate_scale_clear_tbl_windows(tbl);
}
}
}
@@ -1478,8 +1490,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
const struct rs_tx_column *next_col;
allow_column_func_t allow_func;
- u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
- s32 *expected_tpt_tbl;
+ u8 valid_ants = mvm->fw->valid_tx_ant;
+ const u16 *expected_tpt_tbl;
s32 tpt, max_expected_tpt;
for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
@@ -1725,7 +1737,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
int low = IWL_RATE_INVALID;
int high = IWL_RATE_INVALID;
int index;
- int i;
struct iwl_rate_scale_data *window = NULL;
int current_tpt = IWL_INVALID_VALUE;
int low_tpt = IWL_INVALID_VALUE;
@@ -2010,8 +2021,7 @@ lq_update:
if (lq_sta->search_better_tbl) {
/* Access the "search" table, clear its history. */
tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&(tbl->win[i]));
+ rs_rate_scale_clear_tbl_windows(tbl);
/* Use new "search" start rate */
index = tbl->rate.index;
@@ -2090,7 +2100,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
i = lq_sta->last_txrate_idx;
- valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
+ valid_tx_ant = mvm->fw->valid_tx_ant;
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
@@ -2241,6 +2251,73 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
}
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm,
+ struct iwl_mvm_frame_stats *stats)
+{
+ spin_lock_bh(&mvm->drv_stats_lock);
+ memset(stats, 0, sizeof(*stats));
+ spin_unlock_bh(&mvm->drv_stats_lock);
+}
+
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
+ struct iwl_mvm_frame_stats *stats,
+ u32 rate, bool agg)
+{
+ u8 nss = 0, mcs = 0;
+
+ spin_lock(&mvm->drv_stats_lock);
+
+ if (agg)
+ stats->agg_frames++;
+
+ stats->success_frames++;
+
+ switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ stats->bw_20_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ stats->bw_40_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ stats->bw_80_frames++;
+ break;
+ default:
+ WARN_ONCE(1, "bad BW. rate 0x%x", rate);
+ }
+
+ if (rate & RATE_MCS_HT_MSK) {
+ stats->ht_frames++;
+ mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
+ } else if (rate & RATE_MCS_VHT_MSK) {
+ stats->vht_frames++;
+ mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+ nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ } else {
+ stats->legacy_frames++;
+ }
+
+ if (nss == 1)
+ stats->siso_frames++;
+ else if (nss == 2)
+ stats->mimo2_frames++;
+
+ if (rate & RATE_MCS_SGI_MSK)
+ stats->sgi_frames++;
+ else
+ stats->ngi_frames++;
+
+ stats->last_rates[stats->last_frame_idx] = rate;
+ stats->last_frame_idx = (stats->last_frame_idx + 1) %
+ ARRAY_SIZE(stats->last_rates);
+
+ spin_unlock(&mvm->drv_stats_lock);
+}
+#endif
+
/*
* Called after adding a new station to initialize rate scaling
*/
@@ -2265,8 +2342,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->lq.sta_id = sta_priv->sta_id;
for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+ rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]);
lq_sta->flush_timer = 0;
@@ -2320,7 +2396,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
- first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+ first_antenna(mvm->fw->valid_tx_ant);
lq_sta->lq.dual_stream_ant_msk = ANT_AB;
/* as default allow aggregation for all tids */
@@ -2335,7 +2411,9 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->dbg_fixed_rate = 0;
#endif
-
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
+#endif
rs_initialize_lq(mvm, sta, lq_sta, band, init);
}
@@ -2446,7 +2524,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
memcpy(&rate, initial_rate, sizeof(rate));
- valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
+ valid_tx_ant = mvm->fw->valid_tx_ant;
if (is_siso(&rate)) {
num_rates = RS_INITIAL_SISO_NUM_RATES;
@@ -2523,7 +2601,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
if (sta)
lq_cmd->agg_time_limit =
- cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
+ cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2547,7 +2625,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-static int rs_pretty_print_rate(char *buf, const u32 rate)
+int rs_pretty_print_rate(char *buf, const u32 rate)
{
char *type, *bw;
@@ -2596,7 +2674,7 @@ static int rs_pretty_print_rate(char *buf, const u32 rate)
return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
- (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
(rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
(rate & RATE_MCS_BF_MSK) ? "BF " : "",
(rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
@@ -2677,9 +2755,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (iwl_fw_valid_tx_ant(mvm->fw) & ANT_A) ? "ANT_A," : "",
- (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
- (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
+ (mvm->fw->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (mvm->fw->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (mvm->fw->valid_tx_ant & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(rate)) ? "legacy" :
is_vht(rate) ? "VHT" : "HT");
@@ -2815,8 +2893,8 @@ static void rs_rate_init_stub(void *mvm_r,
struct ieee80211_sta *sta, void *mvm_sta)
{
}
-static struct rate_control_ops rs_mvm_ops = {
- .module = NULL,
+
+static const struct rate_control_ops rs_mvm_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 7bc6404f6986..3332b396011e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -277,7 +277,7 @@ enum rs_column {
struct iwl_scale_tbl_info {
struct rs_rate rate;
enum rs_column column;
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index a85b60f7e67e..6061553a5e44 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -77,6 +77,15 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
mvm->ampdu_ref++;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+ spin_lock(&mvm->drv_stats_lock);
+ mvm->drv_rx_stats.ampdu_count++;
+ spin_unlock(&mvm->drv_stats_lock);
+ }
+#endif
+
return 0;
}
@@ -129,22 +138,16 @@ static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
struct ieee80211_rx_status *rx_status)
{
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
- int rssi_all_band_a, rssi_all_band_b;
- u32 agc_a, agc_b, max_agc;
+ u32 agc_a, agc_b;
u32 val;
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
- max_agc = max_t(u32, agc_a, agc_b);
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
- rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
- IWL_OFDM_RSSI_ALLBAND_A_POS;
- rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
- IWL_OFDM_RSSI_ALLBAND_B_POS;
/*
* dBm = rssi dB - agc dB - constant.
@@ -364,31 +367,43 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_status.flag |= RX_FLAG_40MHZ;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status.flag |= RX_FLAG_80MHZ;
+ rx_status.vht_flag |= RX_VHT_FLAG_80MHZ;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status.flag |= RX_FLAG_160MHZ;
+ rx_status.vht_flag |= RX_VHT_FLAG_160MHZ;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
rx_status.flag |= RX_FLAG_HT_GF;
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status.flag |= RX_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
rx_status.flag |= RX_FLAG_HT;
rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
rx_status.vht_nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status.flag |= RX_FLAG_VHT;
+ rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
} else {
rx_status.rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status.band);
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags,
+ rx_status.flag & RX_FLAG_AMPDU_DETAILS);
+#endif
iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status,
rxb, &rx_status);
return 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 742afc429c94..c91dc8498852 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -70,9 +70,16 @@
#define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10
-#define LONG_OUT_TIME_PERIOD 600
-#define SHORT_OUT_TIME_PERIOD 200
-#define SUSPEND_TIME_PERIOD 100
+
+struct iwl_mvm_scan_params {
+ u32 max_out_time;
+ u32 suspend_time;
+ bool passive_fragmented;
+ struct _dwell {
+ u16 passive;
+ u16 active;
+ } dwell[IEEE80211_NUM_BANDS];
+};
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
@@ -82,7 +89,7 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
if (mvm->scan_rx_ant != ANT_NONE)
rx_ant = mvm->scan_rx_ant;
else
- rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
+ rx_ant = mvm->fw->valid_rx_ant;
rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -90,24 +97,6 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
-static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif,
- u32 flags, bool is_assoc)
-{
- if (!is_assoc)
- return 0;
- if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
- return cpu_to_le32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
- return cpu_to_le32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
-}
-
-static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif,
- bool is_assoc)
-{
- if (!is_assoc)
- return 0;
- return cpu_to_le32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
-}
-
static inline __le32
iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req)
{
@@ -124,7 +113,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
u32 tx_ant;
mvm->scan_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
+ iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
mvm->scan_last_antenna_idx);
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
@@ -181,15 +170,14 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct cfg80211_scan_request *req,
- bool basic_ssid)
+ bool basic_ssid,
+ struct iwl_mvm_scan_params *params)
{
- u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
- u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
- req->n_ssids);
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
int type = BIT(req->n_ssids) - 1;
+ enum ieee80211_band band = req->channels[0]->band;
if (!basic_ssid)
type |= BIT(req->n_ssids);
@@ -199,8 +187,8 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
chan->type = cpu_to_le32(type);
if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
- chan->active_dwell = cpu_to_le16(active_dwell);
- chan->passive_dwell = cpu_to_le16(passive_dwell);
+ chan->active_dwell = cpu_to_le16(params->dwell[band].active);
+ chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
chan->iteration_count = cpu_to_le16(1);
chan++;
}
@@ -267,13 +255,76 @@ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
return (u16)len;
}
-static void iwl_mvm_vif_assoc_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- bool *is_assoc = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool *global_bound = data;
+
+ if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < MAX_PHYS)
+ *global_bound = true;
+}
+
+static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int n_ssids,
+ struct iwl_mvm_scan_params *params)
+{
+ bool global_bound = false;
+ enum ieee80211_band band;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_condition_iterator,
+ &global_bound);
+ /*
+ * Under low latency traffic passive scan is fragmented meaning
+ * that dwell on a particular channel will be fragmented. Each fragment
+ * dwell time is 20ms and fragments period is 105ms. Skipping to next
+ * channel will be delayed by the same period - 105ms. So suspend_time
+ * parameter describing both fragments and channels skipping periods is
+ * set to 105ms. This value is chosen so that overall passive scan
+ * duration will not be too long. Max_out_time in this case is set to
+ * 70ms, so for active scanning operating channel will be left for 70ms
+ * while for passive still for 20ms (fragment dwell).
+ */
+ if (global_bound) {
+ if (!iwl_mvm_low_latency(mvm)) {
+ params->suspend_time = ieee80211_tu_to_usec(100);
+ params->max_out_time = ieee80211_tu_to_usec(600);
+ } else {
+ params->suspend_time = ieee80211_tu_to_usec(105);
+ /* P2P doesn't support fragmented passive scan, so
+ * configure max_out_time to be at least longest dwell
+ * time for passive scan.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
+ params->max_out_time = ieee80211_tu_to_usec(70);
+ params->passive_fragmented = true;
+ } else {
+ u32 passive_dwell;
- if (vif->bss_conf.assoc)
- *is_assoc = true;
+ /*
+ * Use band G so that passive channel dwell time
+ * will be assigned with maximum value.
+ */
+ band = IEEE80211_BAND_2GHZ;
+ passive_dwell = iwl_mvm_get_passive_dwell(band);
+ params->max_out_time =
+ ieee80211_tu_to_usec(passive_dwell);
+ }
+ }
+ }
+
+ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ if (params->passive_fragmented)
+ params->dwell[band].passive = 20;
+ else
+ params->dwell[band].passive =
+ iwl_mvm_get_passive_dwell(band);
+ params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+ n_ssids);
+ }
}
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
@@ -288,13 +339,13 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_cmd *cmd = mvm->scan_cmd;
- bool is_assoc = false;
int ret;
u32 status;
int ssid_len = 0;
u8 *ssid = NULL;
bool basic_ssid = !(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
+ struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
BUG_ON(mvm->scan_cmd == NULL);
@@ -304,17 +355,18 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
(MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_vif_assoc_iterator,
- &is_assoc);
+
cmd->channel_count = (u8)req->n_channels;
cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
- cmd->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
- is_assoc);
- cmd->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
+
+ iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
+ cmd->max_out_time = cpu_to_le32(params.max_out_time);
+ cmd->suspend_time = cpu_to_le32(params.suspend_time);
+ if (params.passive_fragmented)
+ cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
+
cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
@@ -360,7 +412,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
req->ie, req->ie_len,
mvm->fw->ucode_capa.max_probe_length));
- iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
+ iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
le16_to_cpu(cmd->tx_cmd.len) +
@@ -402,12 +454,17 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_complete_notif *notif = (void *)pkt->data;
+ lockdep_assert_held(&mvm->mutex);
+
IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
notif->status, notif->scanned_channels);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ if (mvm->scan_status == IWL_MVM_SCAN_OS)
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+
return 0;
}
@@ -464,7 +521,7 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
};
}
-void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
+int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_scan_abort;
static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
@@ -472,12 +529,13 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
int ret;
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- return;
+ return 0;
if (iwl_mvm_is_radio_killed(mvm)) {
ieee80211_scan_completed(mvm->hw, true);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
mvm->scan_status = IWL_MVM_SCAN_NONE;
- return;
+ return 0;
}
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
@@ -488,18 +546,15 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
- /* mac80211's state will be cleaned in the fw_restart flow */
+ /* mac80211's state will be cleaned in the nic_restart flow */
goto out_remove_notif;
}
- ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ);
- if (ret)
- IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
-
- return;
+ return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
out_remove_notif:
iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
+ return ret;
}
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -509,12 +564,18 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+ /* scan status must be locked for proper checking */
+ lockdep_assert_held(&mvm->mutex);
+
IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted");
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- ieee80211_sched_scan_stopped(mvm->hw);
+ /* only call mac80211 completion if the stop was initiated by FW */
+ if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ieee80211_sched_scan_stopped(mvm->hw);
+ }
return 0;
}
@@ -545,14 +606,9 @@ static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
- struct iwl_scan_offload_cmd *scan)
+ struct iwl_scan_offload_cmd *scan,
+ struct iwl_mvm_scan_params *params)
{
- bool is_assoc = false;
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_vif_assoc_iterator,
- &is_assoc);
scan->channel_count =
mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
@@ -560,13 +616,17 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
- scan->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
- is_assoc);
- scan->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
+
+ scan->max_out_time = cpu_to_le32(params->max_out_time);
+ scan->suspend_time = cpu_to_le32(params->suspend_time);
+
scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
scan->rep_count = cpu_to_le32(1);
+
+ if (params->passive_fragmented)
+ scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
}
static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -596,6 +656,9 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
* config match list.
*/
for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+ /* skip empty SSID matchsets */
+ if (!req->match_sets[i].ssid.ssid_len)
+ continue;
scan->direct_scan[i].id = WLAN_EID_SSID;
scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
@@ -628,12 +691,11 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
struct iwl_scan_channel_cfg *channels,
enum ieee80211_band band,
int *head, int *tail,
- u32 ssid_bitmap)
+ u32 ssid_bitmap,
+ struct iwl_mvm_scan_params *params)
{
struct ieee80211_supported_band *s_band;
- int n_probes = req->n_ssids;
int n_channels = req->n_channels;
- u8 active_dwell, passive_dwell;
int i, j, index = 0;
bool partial;
@@ -643,8 +705,6 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
* to scan. So add requested channels to head of the list and others to
* the end.
*/
- active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
- passive_dwell = iwl_mvm_get_passive_dwell(band);
s_band = &mvm->nvm_data->bands[band];
for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
@@ -668,8 +728,8 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
channels->channel_number[index] =
cpu_to_le16(ieee80211_frequency_to_channel(
s_band->channels[i].center_freq));
- channels->dwell_time[index][0] = active_dwell;
- channels->dwell_time[index][1] = passive_dwell;
+ channels->dwell_time[index][0] = params->dwell[band].active;
+ channels->dwell_time[index][1] = params->dwell[band].passive;
channels->iter_count[index] = cpu_to_le16(1);
channels->iter_interval[index] = 0;
@@ -698,7 +758,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies)
{
- int supported_bands = 0;
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
@@ -712,22 +771,19 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
.id = SCAN_OFFLOAD_CONFIG_CMD,
.flags = CMD_SYNC,
};
+ struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
- if (band_2ghz)
- supported_bands++;
- if (band_5ghz)
- supported_bands++;
-
cmd_len = sizeof(struct iwl_scan_offload_cfg) +
- supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
+ 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
if (!scan_cfg)
return -ENOMEM;
- iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
+ iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
+ iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
@@ -739,7 +795,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
scan_cfg->data);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
IEEE80211_BAND_2GHZ, &head, &tail,
- ssid_bitmap);
+ ssid_bitmap, &params);
}
if (band_5ghz) {
iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
@@ -749,7 +805,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
SCAN_OFFLOAD_PROBE_REQ_SIZE);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
IEEE80211_BAND_5GHZ, &head, &tail,
- ssid_bitmap);
+ ssid_bitmap, &params);
}
cmd.data[0] = scan_cfg;
@@ -889,26 +945,49 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
* microcode has notified us that a scan is completed.
*/
IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
- ret = -EIO;
+ ret = -ENOENT;
}
return ret;
}
-void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
{
int ret;
+ struct iwl_notification_wait wait_scan_done;
+ static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
lockdep_assert_held(&mvm->mutex);
if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
- return;
+ return 0;
}
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+ scan_done_notif,
+ ARRAY_SIZE(scan_done_notif),
+ NULL, NULL);
+
ret = iwl_mvm_send_sched_scan_abort(mvm);
- if (ret)
+ if (ret) {
IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
- else
- IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return ret;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear the scan status so the next scan requests will succeed. This
+ * also ensures the Rx handler doesn't do anything, as the scan was
+ * stopped from above.
+ */
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
+
+ return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 3397f59cd4e4..f339ef884250 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,27 +66,27 @@
#include "sta.h"
#include "rs.h"
-static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
{
memset(cmd_v5, 0, sizeof(*cmd_v5));
- cmd_v5->add_modify = cmd_v6->add_modify;
- cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
- cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
- memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
- cmd_v5->sta_id = cmd_v6->sta_id;
- cmd_v5->modify_mask = cmd_v6->modify_mask;
- cmd_v5->station_flags = cmd_v6->station_flags;
- cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
- cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
- cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
- cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
- cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
- cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
- cmd_v5->assoc_id = cmd_v6->assoc_id;
- cmd_v5->beamform_flags = cmd_v6->beamform_flags;
- cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+ cmd_v5->add_modify = cmd_v7->add_modify;
+ cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
+ cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
+ memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
+ cmd_v5->sta_id = cmd_v7->sta_id;
+ cmd_v5->modify_mask = cmd_v7->modify_mask;
+ cmd_v5->station_flags = cmd_v7->station_flags;
+ cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
+ cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
+ cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
+ cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
+ cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
+ cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
+ cmd_v5->assoc_id = cmd_v7->assoc_id;
+ cmd_v5->beamform_flags = cmd_v7->beamform_flags;
+ cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
}
static void
@@ -110,7 +110,7 @@ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
}
static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_cmd_v6 *cmd,
+ struct iwl_mvm_add_sta_cmd_v7 *cmd,
int *status)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -119,14 +119,14 @@ static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
cmd, status);
- iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+ iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
&cmd_v5, status);
}
static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
- struct iwl_mvm_add_sta_cmd_v6 *cmd)
+ struct iwl_mvm_add_sta_cmd_v7 *cmd)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -134,7 +134,7 @@ static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
sizeof(*cmd), cmd);
- iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+ iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
&cmd_v5);
@@ -175,19 +175,30 @@ static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
&sta_cmd);
}
-static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
+ enum nl80211_iftype iftype)
{
int sta_id;
+ u32 reserved_ids = 0;
+ BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
lockdep_assert_held(&mvm->mutex);
+ /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
+ if (iftype != NL80211_IFTYPE_STATION)
+ reserved_ids = BIT(0);
+
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
- for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
+ for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
+ if (BIT(sta_id) & reserved_ids)
+ continue;
+
if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex)))
return sta_id;
+ }
return IWL_MVM_STATION_COUNT;
}
@@ -196,7 +207,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -312,7 +323,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- sta_id = iwl_mvm_find_free_sta_id(mvm);
+ sta_id = iwl_mvm_find_free_sta_id(mvm,
+ ieee80211_vif_type_p2p(vif));
else
sta_id = mvm_sta->sta_id;
@@ -368,7 +380,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -522,6 +534,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ /* clear d0i3_ap_sta_id if no longer relevant */
+ if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
}
/*
@@ -560,10 +576,10 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
}
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
- u32 qmask)
+ u32 qmask, enum nl80211_iftype iftype)
{
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- sta->sta_id = iwl_mvm_find_free_sta_id(mvm);
+ sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
}
@@ -587,13 +603,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd_v6 cmd;
+ struct iwl_mvm_add_sta_cmd_v7 cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
+ memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -627,7 +643,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
/* Add the aux station, but without any queues */
- ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0);
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0,
+ NL80211_IFTYPE_UNSPECIFIED);
if (ret)
return ret;
@@ -699,7 +716,8 @@ int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
- ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask);
+ ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask,
+ ieee80211_vif_type_p2p(vif));
if (ret)
return ret;
@@ -735,7 +753,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -794,7 +812,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -833,7 +851,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
-static const u8 tid_to_ac[] = {
+const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
@@ -844,6 +862,17 @@ static const u8 tid_to_ac[] = {
IEEE80211_AC_VO,
};
+static const u8 tid_to_ucode_ac[] = {
+ AC_BE,
+ AC_BK,
+ AC_BK,
+ AC_BE,
+ AC_VI,
+ AC_VI,
+ AC_VO,
+ AC_VO,
+};
+
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
@@ -873,10 +902,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
}
+ spin_lock_bh(&mvmsta->lock);
+
+ /* possible race condition - we entered D0i3 while starting agg */
+ if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
+ spin_unlock_bh(&mvmsta->lock);
+ IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
+ return -EIO;
+ }
+
/* the new tx queue is still connected to the same mac80211 queue */
- mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]];
+ mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
- spin_lock_bh(&mvmsta->lock);
tid_data = &mvmsta->tid_data[tid];
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
tid_data->txq_id = txq_id;
@@ -916,7 +953,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->ssn = 0xffff;
spin_unlock_bh(&mvmsta->lock);
- fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]];
+ fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@@ -1411,7 +1448,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v6 cmd = {
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1427,28 +1464,102 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
- u16 cnt)
+ u16 cnt, u16 tids, bool more_data,
+ bool agg)
{
- u16 sleep_state_flags =
- (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
- STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v6 cmd = {
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
.sleep_tx_count = cpu_to_le16(cnt),
.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
- /*
- * Same modify mask for sleep_tx_count and sleep_state_flags so
- * we must set the sleep_state_flags too.
- */
- .sleep_state_flags = cpu_to_le16(sleep_state_flags),
};
- int ret;
+ int tid, ret;
+ unsigned long _tids = tids;
+
+ /* convert TIDs to ACs - we don't support TSPEC so that's OK
+ * Note that this field is reserved and unused by firmware not
+ * supporting GO uAPSD, so it's safe to always do this.
+ */
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
+ cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
+
+ /* If we're releasing frames from aggregation queues then check if the
+ * all queues combined that we're releasing frames from have
+ * - more frames than the service period, in which case more_data
+ * needs to be set
+ * - fewer than 'cnt' frames, in which case we need to adjust the
+ * firmware command (but do that unconditionally)
+ */
+ if (agg) {
+ int remaining = cnt;
+
+ spin_lock_bh(&mvmsta->lock);
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
+ struct iwl_mvm_tid_data *tid_data;
+ u16 n_queued;
+
+ tid_data = &mvmsta->tid_data[tid];
+ if (WARN(tid_data->state != IWL_AGG_ON &&
+ tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
+ "TID %d state is %d\n",
+ tid, tid_data->state)) {
+ spin_unlock_bh(&mvmsta->lock);
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+
+ n_queued = iwl_mvm_tid_queued(tid_data);
+ if (n_queued > remaining) {
+ more_data = true;
+ remaining = 0;
+ break;
+ }
+ remaining -= n_queued;
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
+ if (WARN_ON(cnt - remaining == 0)) {
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+ }
+
+ /* Note: this is ignored by firmware not supporting GO uAPSD */
+ if (more_data)
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
+
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
+ mvmsta->next_status_eosp = true;
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
+ } else {
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
+ }
- /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
+
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
+ return 0;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (!IS_ERR_OR_NULL(sta))
+ ieee80211_sta_eosp(sta);
+ rcu_read_unlock();
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 4968d0237dc5..2ed84c421481 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -195,24 +195,33 @@ struct iwl_mvm;
/**
* DOC: AP mode - PS
*
- * When a station is asleep, the fw will set it as "asleep". All the
- * non-aggregation frames to that station will be dropped by the fw
- * (%TX_STATUS_FAIL_DEST_PS failure code).
+ * When a station is asleep, the fw will set it as "asleep". All frames on
+ * shared queues (i.e. non-aggregation queues) to that station will be dropped
+ * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
+ *
* AMPDUs are in a separate queue that is stopped by the fw. We just need to
- * let mac80211 know how many frames we have in these queues so that it can
+ * let mac80211 know when there are frames in these queues so that it can
* properly handle trigger frames.
- * When the a trigger frame is received, mac80211 tells the driver to send
- * frames from the AMPDU queues or AC queue depending on which queue are
- * delivery-enabled and what TID has frames to transmit (Note that mac80211 has
- * all the knowledege since all the non-agg frames are buffered / filtered, and
- * the driver tells mac80211 about agg frames). The driver needs to tell the fw
- * to let frames out even if the station is asleep. This is done by
- * %iwl_mvm_sta_modify_sleep_tx_count.
- * When we receive a frame from that station with PM bit unset, the
- * driver needs to let the fw know that this station isn't alseep any more.
- * This is done by %iwl_mvm_sta_modify_ps_wake.
- *
- * TODO - EOSP handling
+ *
+ * When a trigger frame is received, mac80211 tells the driver to send frames
+ * from the AMPDU queues or sends frames to non-aggregation queues itself,
+ * depending on which ACs are delivery-enabled and what TID has frames to
+ * transmit. Note that mac80211 has all the knowledege since all the non-agg
+ * frames are buffered / filtered, and the driver tells mac80211 about agg
+ * frames). The driver needs to tell the fw to let frames out even if the
+ * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
+ *
+ * When we receive a frame from that station with PM bit unset, the driver
+ * needs to let the fw know that this station isn't asleep any more. This is
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signalling the
+ * station's wakeup.
+ *
+ * For a GO, the Service Period might be cut short due to an absence period
+ * of the GO. In this (and all other cases) the firmware notifies us with the
+ * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
+ * already sent to the device will be rejected again.
+ *
+ * See also "AP support for powersaving clients" in mac80211.h.
*/
/**
@@ -261,6 +270,12 @@ struct iwl_mvm_tid_data {
u16 ssn;
};
+static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
+{
+ return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
+ tid_data->next_reclaimed);
+}
+
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
@@ -269,7 +284,11 @@ struct iwl_mvm_tid_data {
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
+ * by debugfs.
* @bt_reduced_txpower: is reduced tx power enabled for this station
+ * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
+ * we need to signal the EOSP
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data.
@@ -287,7 +306,9 @@ struct iwl_mvm_sta {
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
+ bool bt_reduced_txpower_dbg;
bool bt_reduced_txpower;
+ bool next_status_eosp;
spinlock_t lock;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
@@ -345,6 +366,10 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start);
@@ -359,7 +384,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
- u32 qmask);
+ u32 qmask, enum nl80211_iftype iftype);
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta);
int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -375,7 +400,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
- u16 cnt);
+ u16 cnt, u16 tids, bool more_data,
+ bool agg);
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index b4c2abaa297b..61331245ad93 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -126,6 +126,7 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
* in iwl_mvm_te_handle_notif).
*/
clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
/*
* Of course, our status bit is just as racy as mac80211, so in
@@ -210,6 +211,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
ieee80211_ready_on_channel(mvm->hw);
}
} else {
@@ -436,7 +438,8 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
time_cmd.duration = cpu_to_le32(duration);
time_cmd.repeat = 1;
time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
- TE_V2_NOTIF_HOST_EVENT_END);
+ TE_V2_NOTIF_HOST_EVENT_END |
+ T2_V2_START_IMMEDIATELY);
iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
@@ -551,7 +554,8 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
time_cmd.repeat = 1;
time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
- TE_V2_NOTIF_HOST_EVENT_END);
+ TE_V2_NOTIF_HOST_EVENT_END |
+ T2_V2_START_IMMEDIATELY);
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 3afa6b6bf835..7a99fa361954 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -403,7 +403,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
}
}
-static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
{
struct iwl_host_cmd cmd = {
.id = REPLY_THERMAL_MNG_BACKOFF,
@@ -412,6 +412,8 @@ static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
.flags = CMD_SYNC,
};
+ backoff = max(backoff, mvm->thermal_throttle.min_backoff);
+
if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
backoff);
@@ -534,7 +536,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
.support_tx_backoff = true,
};
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
@@ -546,6 +548,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
tt->params = &iwl7000_tt_params;
tt->throttle = false;
+ tt->min_backoff = min_backoff;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 76ee486039d7..879aeac46cc1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -79,6 +79,7 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc = hdr->frame_control;
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
u32 len = skb->len + FCS_LEN;
+ u8 ac;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
tx_flags |= TX_CMD_FLG_ACK;
@@ -90,13 +91,6 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
- /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
- if (info->band == IEEE80211_BAND_2GHZ &&
- (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
- is_multicast_ether_addr(hdr->addr1) ||
- ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
- tx_flags |= TX_CMD_FLG_BT_DIS;
-
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -112,6 +106,11 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
}
+ /* tid_tspec will default to 0 = BE when QOS isn't enabled */
+ ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
+ TX_CMD_FLG_BT_PRIO_POS;
+
if (ieee80211_is_mgmt(fc)) {
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
tx_cmd->pm_frame_timeout = cpu_to_le16(3);
@@ -122,15 +121,12 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
* it
*/
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
- } else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
tx_cmd->pm_frame_timeout = cpu_to_le16(2);
} else {
tx_cmd->pm_frame_timeout = 0;
}
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
-
if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
!is_multicast_ether_addr(ieee80211_get_DA(hdr)))
tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
@@ -207,7 +203,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
mvm->mgmt_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
+ iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
mvm->mgmt_last_antenna_idx);
rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
@@ -377,6 +373,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* From now on, we cannot access info->control */
+ /*
+ * we handle that entirely ourselves -- for uAPSD the firmware
+ * will always send a notification, and for PS-Poll responses
+ * we'll notify mac80211 when getting frame status
+ */
+ info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+
spin_lock(&mvmsta->lock);
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
@@ -437,6 +440,17 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
+ if ((tid_data->state == IWL_AGG_ON ||
+ tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+ iwl_mvm_tid_queued(tid_data) == 0) {
+ /*
+ * Now that this aggregation queue is empty tell mac80211 so it
+ * knows we no longer have frames buffered for the station on
+ * this TID (for the TIM bitmap calculation.)
+ */
+ ieee80211_sta_set_buffered(sta, tid, false);
+ }
+
if (tid_data->ssn != tid_data->next_reclaimed)
return;
@@ -680,6 +694,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_check_ratid_empty(mvm, sta, tid);
spin_unlock_bh(&mvmsta->lock);
}
+
+ if (mvmsta->next_status_eosp) {
+ mvmsta->next_status_eosp = false;
+ ieee80211_sta_eosp(sta);
+ }
} else {
mvmsta = NULL;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 86989df69356..d619851745a1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -289,8 +289,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
-static struct {
- char *name;
+static const struct {
+ const char *name;
u8 num;
} advanced_lookup[] = {
{ "NMI_INTERRUPT_WDG", 0x34 },
@@ -376,9 +376,67 @@ struct iwl_error_event_table {
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed;
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 pc; /* program counter */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 line; /* source code line of error */
+ u32 umac_ver; /* umac version */
+} __packed;
+
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_umac_error_event_table table;
+ u32 base;
+
+ base = mvm->umac_error_event_table;
+
+ if (base < 0x800000 || base >= 0x80C000) {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (mvm->cur_ucode == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ mvm->status, table.valid);
+ }
+
+ IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(mvm, "0x%08X | umac uPc\n", table.pc);
+ IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
+ IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
+ IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
+ IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
+ IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_ver);
+}
+
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
@@ -394,7 +452,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
base = mvm->fw->inst_errlog_ptr;
}
- if (base < 0x800000 || base >= 0x80C000) {
+ if (base < 0x800000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
@@ -453,29 +511,31 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+
+ if (mvm->support_umac_log)
+ iwl_mvm_dump_umac_error_log(mvm);
}
-void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
+void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
{
const struct fw_img *img;
- int ofs, len = 0;
- u8 *buf;
+ u32 ofs, sram_len;
+ void *sram;
- if (!mvm->ucode_loaded)
+ if (!mvm->ucode_loaded || mvm->fw_error_sram)
return;
img = &mvm->fw->img[mvm->cur_ucode];
ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
- len = img->sec[IWL_UCODE_SECTION_DATA].len;
+ sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
- buf = kzalloc(len, GFP_ATOMIC);
- if (!buf)
+ sram = kzalloc(sram_len, GFP_ATOMIC);
+ if (!sram)
return;
- iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
- iwl_print_hex_error(mvm->trans, buf, len);
-
- kfree(buf);
+ iwl_trans_read_mem_bytes(mvm->trans, ofs, sram, sram_len);
+ mvm->fw_error_sram = sram;
+ mvm->fw_error_sram_len = sram_len;
}
/**
@@ -516,15 +576,20 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_request)
{
struct iwl_mvm_vif *mvmvif;
- enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ enum ieee80211_smps_mode smps_mode;
int i;
lockdep_assert_held(&mvm->mutex);
/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
- if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1)
+ if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
return;
+ if (vif->type == NL80211_IFTYPE_AP)
+ smps_mode = IEEE80211_SMPS_OFF;
+ else
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->smps_requests[req_type] = smps_request;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
@@ -538,3 +603,44 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
+
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool value)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int res;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvmvif->low_latency == value)
+ return 0;
+
+ mvmvif->low_latency = value;
+
+ res = iwl_mvm_update_quotas(mvm, NULL);
+ if (res)
+ return res;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ return iwl_mvm_power_update_mac(mvm, vif);
+}
+
+static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *result = _data;
+
+ if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
+ *result = true;
+}
+
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
+{
+ bool result = false;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_ll_iter, &result);
+
+ return result;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3872ead75488..edb015c99049 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -66,6 +66,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-drv.h"
@@ -389,12 +390,92 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+
+/* 8000 Series */
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#ifdef CONFIG_ACPI
+#define SPL_METHOD "SPLC"
+#define SPL_DOMAINTYPE_MODULE BIT(0)
+#define SPL_DOMAINTYPE_WIFI BIT(1)
+#define SPL_DOMAINTYPE_WIGIG BIT(2)
+#define SPL_DOMAINTYPE_RFEM BIT(3)
+
+static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+{
+ union acpi_object *limits, *domain_type, *power_limit;
+
+ if (splx->type != ACPI_TYPE_PACKAGE ||
+ splx->package.count != 2 ||
+ splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splx->package.elements[0].integer.value != 0) {
+ IWL_ERR(trans, "Unsupported splx structure");
+ return 0;
+ }
+
+ limits = &splx->package.elements[1];
+ if (limits->type != ACPI_TYPE_PACKAGE ||
+ limits->package.count < 2 ||
+ limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ IWL_ERR(trans, "Invalid limits element");
+ return 0;
+ }
+
+ domain_type = &limits->package.elements[0];
+ power_limit = &limits->package.elements[1];
+ if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+ IWL_DEBUG_INFO(trans, "WiFi power is not limited");
+ return 0;
+ }
+
+ return power_limit->integer.value;
+}
+
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+{
+ acpi_handle pxsx_handle;
+ acpi_handle handle;
+ struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ pxsx_handle = ACPI_HANDLE(&pdev->dev);
+ if (!pxsx_handle) {
+ IWL_DEBUG_INFO(trans,
+ "Could not retrieve root port ACPI handle");
+ return;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_INFO(trans, "SPL method not found");
+ return;
+ }
+
+ /* Call SPLC with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ if (ACPI_FAILURE(status)) {
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
+ return;
+ }
+
+ trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
+ trans->dflt_pwr_limit);
+ kfree(splx.pointer);
+}
+
+#else /* CONFIG_ACPI */
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
+#endif
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -419,6 +500,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_trans;
}
+ set_dflt_pwr_limit(iwl_trans, pdev);
+
/* register transport layer debugfs here */
ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
if (ret)
@@ -477,7 +560,7 @@ static int iwl_pci_resume(struct device *device)
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index e851f26fd44c..9091513ea738 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -304,7 +304,7 @@ struct iwl_trans_pcie {
bool bc_table_dword;
u32 rx_page_order;
- const char **command_names;
+ const char *const *command_names;
/* queue watchdog */
unsigned long wd_timeout;
@@ -488,4 +488,6 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 08c23d497a02..fdfa3969cac9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -155,37 +155,26 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
if (rxq->need_update == 0)
goto exit_unlock;
- if (trans->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- /* Device expects a multiple of 8 */
- rxq->write_actual = (rxq->write & ~0x7);
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
- } else {
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
-
- rxq->write_actual = (rxq->write & ~0x7);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- rxq->write_actual);
-
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- rxq->write_actual = (rxq->write & ~0x7);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- rxq->write_actual);
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+ reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
}
}
+
+ rxq->write_actual = round_down(rxq->write, 8);
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
rxq->need_update = 0;
exit_unlock:
@@ -802,10 +791,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta;
- lockdep_assert_held(&trans_pcie->irq_lock);
+ lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
trace_iwlwifi_dev_irq(trans->dev);
@@ -1006,7 +994,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->rfkill++;
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index f9507807b486..dcfd6d866d09 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -75,6 +75,20 @@
#include "iwl-agn-hw.h"
#include "internal.h"
+static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
+{
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (2 << 28)));
+ return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
+}
+
+static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
+{
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (3 << 28)));
+}
+
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -89,6 +103,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
@@ -132,8 +147,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
@@ -203,19 +219,23 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
/*
* Enable DMA clock and wait for it to stabilize.
*
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+ * bits do not disable clocks. This preserves any hardware
+ * bits already set by default in "CLK_CTRL_REG" after reset.
*/
- iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_write_prph(trans, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
- /* Disable L1-Active */
- iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- /* Clear the interrupt in APMG if the NIC is in RFKILL */
- iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+ APMG_RTC_INT_STT_RFKILL);
+ }
set_bit(STATUS_DEVICE_ENABLED, &trans->status);
@@ -223,6 +243,116 @@ out:
return ret;
}
+/*
+ * Enable LP XTAL to avoid HW bug where device may consume much power if
+ * FW is not loaded after device reset. LP XTAL is disabled by default
+ * after device HW reset. Do it only if XTAL is fed by internal source.
+ * Configure device's "persistence" mode to avoid resetting XTAL again when
+ * SHRD_HW_RST occurs in S3.
+ */
+static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
+{
+ int ret;
+ u32 apmg_gp1_reg;
+ u32 apmg_xtal_cfg_reg;
+ u32 dl_cfg_reg;
+
+ /* Force XTAL ON */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+
+ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is possible.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (WARN_ON(ret < 0)) {
+ IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ return;
+ }
+
+ /*
+ * Clear "disable persistence" to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_PERSIST_DIS);
+
+ /*
+ * Force APMG XTAL to be active to prevent its disabling by HW
+ * caused by APMG idle state.
+ */
+ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
+ SHR_APMG_XTAL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg |
+ SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+
+ /*
+ * Reset entire device again - do controller reset (results in
+ * SHRD_HW_RST). Turn MAC off before proceeding.
+ */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /* Enable LP XTAL by indirect access through CSR */
+ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
+ SHR_APMG_GP1_WF_XTAL_LP_EN |
+ SHR_APMG_GP1_CHICKEN_BIT_SELECT);
+
+ /* Clear delay line clock power up */
+ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
+ ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
+
+ /*
+ * Enable persistence mode to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* Activates XTAL resources monitor */
+ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
+ CSR_MONITOR_XTAL_RESOURCES);
+
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ udelay(10);
+
+ /* Release APMG XTAL */
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg &
+ ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+}
+
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
int ret = 0;
@@ -250,6 +380,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
+ if (trans->cfg->lp_xtal_workaround) {
+ iwl_pcie_apm_lp_xtal_enable(trans);
+ return;
+ }
+
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -273,7 +408,8 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
- iwl_pcie_set_pwr(trans, false);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -435,78 +571,106 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
{
int shift_param;
- u32 address;
- int ret = 0;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
if (cpu == 1) {
shift_param = 0;
- address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+ *first_ucode_section = 0;
} else {
shift_param = 16;
- address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+ (*first_ucode_section)++;
}
- /* set CPU to started */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_LOADING_STARTED << shift_param,
- 1);
-
- /* set last complete descriptor number */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
- << shift_param,
- 1);
-
- /* set last loaded block */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
- << shift_param,
- 1);
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ if (i == (*first_ucode_section) + 1)
+ /* set CPU to started */
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_HDRS_LOADING_COMPLETED
+ << shift_param);
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
/* image loading complete */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_LOADING_COMPLETED
- << shift_param,
- 1);
-
- /* set FH_TCSR_0_REG */
- iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
-
- /* verify image verification started */
- ret = iwl_poll_bit(trans, address,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
- CSR_SECURE_TIME_OUT);
- if (ret < 0) {
- IWL_ERR(trans, "secure boot process didn't start\n");
- return ret;
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
+
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int shift_param;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
}
- /* wait for image verification to complete */
- ret = iwl_poll_bit(trans, address,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
- CSR_SECURE_TIME_OUT);
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
- if (ret < 0) {
- IWL_ERR(trans, "Time out on secure boot process\n");
- return ret;
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
}
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ (LMPM_CPU_UCODE_LOADING_COMPLETED |
+ LMPM_CPU_HDRS_LOADING_COMPLETED |
+ LMPM_CPU_UCODE_LOADING_STARTED) <<
+ shift_param);
+
+ *first_ucode_section = last_read_idx;
+
return 0;
}
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
- int i, ret = 0;
+ int ret = 0;
+ int first_ucode_section;
IWL_DEBUG_FW(trans,
"working with %s image\n",
@@ -518,53 +682,68 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* configure the ucode to be ready to get the secured image */
if (image->is_secure) {
/* set secure boot inspector addresses */
- iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
- iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
-
- /* release CPU1 reset if secure inspector image burned in OTP */
- iwl_write32(trans, CSR_RESET, 0);
- }
-
- /* load to FW the binary sections of CPU1 */
- IWL_DEBUG_INFO(trans, "Loading CPU1\n");
- for (i = 0;
- i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
- i++) {
- if (!image->sec[i].data)
- break;
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_CODE_ADDR,
+ LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
+
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_DATA_ADDR,
+ LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
+
+ /* set CPU1 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
+ LMPM_SECURE_CPU1_HDR_MEM_SPACE);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
+ &first_ucode_section);
if (ret)
return ret;
- }
- /* configure the ucode to start secure process on CPU1 */
- if (image->is_secure) {
- /* config CPU1 to start secure protocol */
- ret = iwl_pcie_secure_set(trans, 1);
+ } else {
+ /* load to FW the binary Non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1,
+ &first_ucode_section);
if (ret)
return ret;
- } else {
- /* Remove all resets to allow NIC to operate */
- iwl_write32(trans, CSR_RESET, 0);
}
if (image->is_dual_cpus) {
+ /* set CPU2 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+ LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
/* load to FW the binary sections of CPU2 */
- IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
- for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
- i < IWL_UCODE_SECTION_MAX; i++) {
- if (!image->sec[i].data)
- break;
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
- if (ret)
- return ret;
- }
+ if (image->is_secure)
+ ret = iwl_pcie_load_cpu_secured_sections(
+ trans, image, 2,
+ &first_ucode_section);
+ else
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
+
+ /* release CPU reset */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+ else
+ iwl_write32(trans, CSR_RESET, 0);
- if (image->is_secure) {
- /* set CPU2 for secure protocol */
- ret = iwl_pcie_secure_set(trans, 2);
- if (ret)
- return ret;
+ if (image->is_secure) {
+ /* wait for image verification to complete */
+ ret = iwl_poll_prph_bit(trans,
+ LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_TIME_OUT);
+
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out on secure boot process\n");
+ return ret;
}
}
@@ -591,7 +770,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
@@ -706,7 +885,13 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
else
clear_bit(STATUS_RFKILL, &trans->status);
if (hw_rfkill != was_hw_rfkill)
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+{
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
+ iwl_trans_pcie_stop_device(trans);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -815,7 +1000,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
return 0;
}
@@ -1158,6 +1343,7 @@ static const char *get_csr_string(int cmd)
IWL_CMD(CSR_GIO_CHICKEN_BITS);
IWL_CMD(CSR_ANA_PLL_CFG);
IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_MONITOR_STATUS_REG);
IWL_CMD(CSR_DBG_HPET_MEM_REG);
default:
return "UNKNOWN";
@@ -1190,6 +1376,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
CSR_DRAM_INT_TBL_REG,
CSR_GIO_CHICKEN_BITS,
CSR_ANA_PLL_CFG,
+ CSR_MONITOR_STATUS_REG,
CSR_HW_REV_WA_REG,
CSR_DBG_HPET_MEM_REG
};
@@ -1407,16 +1594,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
char *buf = NULL;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- ret = pos = iwl_dump_fh(trans, &buf);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
+ ssize_t ret;
+ ret = iwl_dump_fh(trans, &buf);
+ if (ret < 0)
+ return ret;
+ if (!buf)
+ return -EINVAL;
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3d549008b3e2..3b0c72c10054 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
le32_to_cpu(txq->scratchbufs[i].scratch));
- iwl_trans_fw_error(trans);
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
}
/*
@@ -296,43 +296,38 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
if (txq->need_update == 0)
return;
- if (trans->cfg->base_params->shadow_reg_enable ||
- txq_id == trans_pcie->cmd_queue) {
- /* shadow register enabled */
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- } else {
- /* if we're trying to save power */
- if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans,
- "Tx queue %d requesting wakeup,"
- " GP1 = 0x%x\n", txq_id, reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
-
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
- txq->q.write_ptr);
-
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
-
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. NIC is woken up for CMD regardless of shadow outside this function
+ * 3. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ txq_id != trans_pcie->cmd_queue &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
- * else not in power-save mode,
- * uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
+ * wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part.
*/
- } else
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+ txq_id, reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
}
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
txq->need_update = 0;
}
@@ -705,8 +700,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
@@ -1028,7 +1024,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, q->write_ptr, q->read_ptr);
- iwl_trans_fw_error(trans);
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
}
}
@@ -1587,6 +1583,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
iwl_trans_fw_error(trans);
goto cancel;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index cb6d189bc3e6..54e344aed6e0 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1766,7 +1766,8 @@ static void lbs_join_post(struct lbs_private *priv,
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;
- cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(priv->dev, bssid, params->chandef.chan,
+ GFP_KERNEL);
/* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
priv->connect_status = LBS_CONNECTED;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 58c6ee5de98f..33ceda296c9c 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -498,7 +498,7 @@ static int if_sdio_prog_helper(struct if_sdio_card *card,
*/
mdelay(2);
- chunk_size = min(size, (size_t)60);
+ chunk_size = min_t(size_t, size, 60);
*((__le32*)chunk_buffer) = cpu_to_le32(chunk_size);
memcpy(chunk_buffer + 4, firmware, chunk_size);
@@ -639,7 +639,7 @@ static int if_sdio_prog_real(struct if_sdio_card *card,
req_size = size;
while (req_size) {
- chunk_size = min(req_size, (size_t)512);
+ chunk_size = min_t(size_t, req_size, 512);
memcpy(chunk_buffer, firmware, chunk_size);
/*
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 69d4c3179d04..9d7a52f5a410 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -57,6 +57,10 @@ static bool rctbl = false;
module_param(rctbl, bool, 0444);
MODULE_PARM_DESC(rctbl, "Handle rate control table");
+static bool support_p2p_device = true;
+module_param(support_p2p_device, bool, 0444);
+MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -335,7 +339,8 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
#endif
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) },
- { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
+ /* must be last, see hwsim_if_comb */
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }
};
static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
@@ -345,6 +350,27 @@ static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
static const struct ieee80211_iface_combination hwsim_if_comb[] = {
{
.limits = hwsim_if_limits,
+ /* remove the last entry which is P2P_DEVICE */
+ .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1,
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ },
+ {
+ .limits = hwsim_if_dfs_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ }
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
+ {
+ .limits = hwsim_if_limits,
.n_limits = ARRAY_SIZE(hwsim_if_limits),
.max_interfaces = 2048,
.num_different_channels = 1,
@@ -385,6 +411,7 @@ struct mac80211_hwsim_data {
struct mac_address addresses[2];
int channels, idx;
+ bool use_chanctx;
struct ieee80211_channel *tmp_chan;
struct delayed_work roc_done;
@@ -451,7 +478,7 @@ static struct genl_family hwsim_genl_family = {
/* MAC80211_HWSIM netlink policy */
-static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
+static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
@@ -468,6 +495,7 @@ static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
[HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
[HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
};
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -1035,32 +1063,6 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
ack = true;
rx_status.mactime = now + data2->tsf_offset;
-#if 0
- /*
- * Don't enable this code by default as the OUI 00:00:00
- * is registered to Xerox so we shouldn't use it here, it
- * might find its way into pcap files.
- * Note that this code requires the headroom in the SKB
- * that was allocated earlier.
- */
- rx_status.vendor_radiotap_oui[0] = 0x00;
- rx_status.vendor_radiotap_oui[1] = 0x00;
- rx_status.vendor_radiotap_oui[2] = 0x00;
- rx_status.vendor_radiotap_subns = 127;
- /*
- * Radiotap vendor namespaces can (and should) also be
- * split into fields by using the standard radiotap
- * presence bitmap mechanism. Use just BIT(0) here for
- * the presence bitmap.
- */
- rx_status.vendor_radiotap_bitmap = BIT(0);
- /* We have 8 bytes of (dummy) data */
- rx_status.vendor_radiotap_len = 8;
- /* For testing, also require it to be aligned */
- rx_status.vendor_radiotap_align = 8;
- /* push the data */
- memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
-#endif
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -1087,7 +1089,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (data->channels == 1) {
+ if (!data->use_chanctx) {
channel = data->channel;
} else if (txi->hw_queue == 4) {
channel = data->tmp_chan;
@@ -1275,6 +1277,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_tx_frame(hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
+
+ if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ ieee80211_csa_finish(vif);
}
static enum hrtimer_restart
@@ -1350,7 +1355,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
data->channel = conf->chandef.chan;
- WARN_ON(data->channel && data->channels > 1);
+ WARN_ON(data->channel && data->use_chanctx);
data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
@@ -1936,7 +1941,8 @@ static struct ieee80211_ops mac80211_hwsim_mchan_ops;
static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
const struct ieee80211_regdomain *regd,
- bool reg_strict)
+ bool reg_strict, bool p2p_device,
+ bool use_chanctx)
{
int err;
u8 addr[ETH_ALEN];
@@ -1946,11 +1952,14 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
int idx;
+ if (WARN_ON(channels > 1 && !use_chanctx))
+ return -EINVAL;
+
spin_lock_bh(&hwsim_radio_lock);
idx = hwsim_radio_idx++;
spin_unlock_bh(&hwsim_radio_lock);
- if (channels > 1)
+ if (use_chanctx)
ops = &mac80211_hwsim_mchan_ops;
hw = ieee80211_alloc_hw(sizeof(*data), ops);
if (!hw) {
@@ -1991,17 +2000,25 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
hw->wiphy->addresses = data->addresses;
data->channels = channels;
+ data->use_chanctx = use_chanctx;
data->idx = idx;
- if (data->channels > 1) {
+ if (data->use_chanctx) {
hw->wiphy->max_scan_ssids = 255;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
hw->wiphy->max_remain_on_channel_duration = 1000;
/* For channels > 1 DFS is not allowed */
hw->wiphy->n_iface_combinations = 1;
hw->wiphy->iface_combinations = &data->if_combination;
- data->if_combination = hwsim_if_comb[0];
+ if (p2p_device)
+ data->if_combination = hwsim_if_comb_p2p_dev[0];
+ else
+ data->if_combination = hwsim_if_comb[0];
data->if_combination.num_different_channels = data->channels;
+ } else if (p2p_device) {
+ hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(hwsim_if_comb_p2p_dev);
} else {
hw->wiphy->iface_combinations = hwsim_if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
@@ -2017,8 +2034,10 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ if (p2p_device)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_SIGNAL_DBM |
@@ -2027,13 +2046,15 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
+ IEEE80211_HW_CHANCTX_STA_CSA;
if (rctbl)
hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_AP_UAPSD;
+ WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
/* ask mac80211 to reserve space for magic */
@@ -2141,7 +2162,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
debugfs_create_file("group", 0666, data->debugfs, data,
&hwsim_fops_group);
- if (data->channels == 1)
+ if (!data->use_chanctx)
debugfs_create_file("dfs_simulate_radar", 0222,
data->debugfs,
data, &hwsim_simulate_radar);
@@ -2407,10 +2428,17 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
const char *alpha2 = NULL;
const struct ieee80211_regdomain *regd = NULL;
bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+ bool p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+ bool use_chanctx;
if (info->attrs[HWSIM_ATTR_CHANNELS])
chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+ if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
+ use_chanctx = true;
+ else
+ use_chanctx = (chans > 1);
+
if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
@@ -2422,7 +2450,8 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict);
+ return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict,
+ p2p_device, use_chanctx);
}
static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
@@ -2640,7 +2669,9 @@ static int __init init_mac80211_hwsim(void)
}
err = mac80211_hwsim_create_radio(channels, reg_alpha2,
- regd, reg_strict);
+ regd, reg_strict,
+ support_p2p_device,
+ channels > 1);
if (err < 0)
goto out_free_radios;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 2747cce5a269..c9d0315575ba 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -107,6 +107,10 @@ enum {
* (nla string, length 2)
* @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute)
* @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute)
+ * @HWSIM_ATTR_SUPPORT_P2P_DEVICE: support P2P Device virtual interface (flag)
+ * @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO
+ * command to force use of channel contexts even when only a
+ * single channel is supported
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -126,6 +130,8 @@ enum {
HWSIM_ATTR_REG_HINT_ALPHA2,
HWSIM_ATTR_REG_CUSTOM_REG,
HWSIM_ATTR_REG_STRICT_REG,
+ HWSIM_ATTR_SUPPORT_P2P_DEVICE,
+ HWSIM_ATTR_USE_CHANCTX,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5d9a8084665d..c92f27aa71ed 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -23,6 +23,31 @@
#include "main.h"
#include "11ac.h"
+/* Tables of the MCS map to the highest data rate (in Mbps) supported
+ * for long GI.
+ */
+static const u16 max_rate_lgi_80MHZ[8][3] = {
+ {0x124, 0x15F, 0x186}, /* NSS = 1 */
+ {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
+ {0x36D, 0x41D, 0x492}, /* NSS = 3 */
+ {0x492, 0x57C, 0x618}, /* NSS = 4 */
+ {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
+ {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
+ {0x924, 0xAF8, 0xC30} /* NSS = 8 */
+};
+
+static const u16 max_rate_lgi_160MHZ[8][3] = {
+ {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
+ {0x492, 0x57C, 0x618}, /* NSS = 2 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
+ {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
+ {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
+ {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
+ {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
+ {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
+};
+
/* This function converts the 2-bit MCS map to the highest long GI
* VHT data rate.
*/
@@ -30,33 +55,10 @@ static u16
mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
u8 bands, u16 mcs_map)
{
- u8 i, nss, max_mcs;
+ u8 i, nss, mcs;
u16 max_rate = 0;
u32 usr_vht_cap_info = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- /* tables of the MCS map to the highest data rate (in Mbps)
- * supported for long GI
- */
- u16 max_rate_lgi_80MHZ[8][3] = {
- {0x124, 0x15F, 0x186}, /* NSS = 1 */
- {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
- {0x36D, 0x41D, 0x492}, /* NSS = 3 */
- {0x492, 0x57C, 0x618}, /* NSS = 4 */
- {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
- {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
- {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
- {0x924, 0xAF8, 0xC30} /* NSS = 8 */
- };
- u16 max_rate_lgi_160MHZ[8][3] = {
- {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
- {0x492, 0x57C, 0x618}, /* NSS = 2 */
- {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
- {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
- {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
- {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
- {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
- {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
- };
if (bands & BAND_AAC)
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
@@ -64,29 +66,29 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
/* find the max NSS supported */
- nss = 0;
- for (i = 0; i < 8; i++) {
- max_mcs = (mcs_map >> (2 * i)) & 0x3;
- if (max_mcs < 3)
+ nss = 1;
+ for (i = 1; i <= 8; i++) {
+ mcs = GET_VHTNSSMCS(mcs_map, i);
+ if (mcs < IEEE80211_VHT_MCS_NOT_SUPPORTED)
nss = i;
}
- max_mcs = (mcs_map >> (2 * nss)) & 0x3;
+ mcs = GET_VHTNSSMCS(mcs_map, nss);
- /* if max_mcs is 3, nss must be 0 (SS = 1). Thus, max mcs is MCS 9 */
- if (max_mcs >= 3)
- max_mcs = 2;
+ /* if mcs is 3, nss must be 1 (NSS = 1). Default mcs to MCS 0~9 */
+ if (mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) {
/* support 160 MHz */
- max_rate = max_rate_lgi_160MHZ[nss][max_mcs];
+ max_rate = max_rate_lgi_160MHZ[nss - 1][mcs];
if (!max_rate)
/* MCS9 is not supported in NSS6 */
- max_rate = max_rate_lgi_160MHZ[nss][max_mcs - 1];
+ max_rate = max_rate_lgi_160MHZ[nss - 1][mcs - 1];
} else {
- max_rate = max_rate_lgi_80MHZ[nss][max_mcs];
+ max_rate = max_rate_lgi_80MHZ[nss - 1][mcs];
if (!max_rate)
/* MCS9 is not supported in NSS3 */
- max_rate = max_rate_lgi_80MHZ[nss][max_mcs - 1];
+ max_rate = max_rate_lgi_80MHZ[nss - 1][mcs - 1];
}
return max_rate;
@@ -94,21 +96,20 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
static void
mwifiex_fill_vht_cap_info(struct mwifiex_private *priv,
- struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+ struct ieee80211_vht_cap *vht_cap, u8 bands)
{
struct mwifiex_adapter *adapter = priv->adapter;
if (bands & BAND_A)
- vht_cap->vht_cap.vht_cap_info =
+ vht_cap->vht_cap_info =
cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a);
else
- vht_cap->vht_cap.vht_cap_info =
+ vht_cap->vht_cap_info =
cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg);
}
-static void
-mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
- struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+ struct ieee80211_vht_cap *vht_cap, u8 bands)
{
struct mwifiex_adapter *adapter = priv->adapter;
u16 mcs_map_user, mcs_map_resp, mcs_map_result;
@@ -119,46 +120,48 @@ mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
/* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */
mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
- mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.rx_mcs_map);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
mcs_map_result = 0;
for (nss = 1; nss <= 8; nss++) {
mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
- if ((mcs_user == NO_NSS_SUPPORT) ||
- (mcs_resp == NO_NSS_SUPPORT))
- SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
else
SET_VHTNSSMCS(mcs_map_result, nss,
min(mcs_user, mcs_resp));
}
- vht_cap->vht_cap.supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
+ vht_cap->supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
- vht_cap->vht_cap.supp_mcs.rx_highest = cpu_to_le16(tmp);
+ vht_cap->supp_mcs.rx_highest = cpu_to_le16(tmp);
/* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */
mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support);
- mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.tx_mcs_map);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map);
mcs_map_result = 0;
for (nss = 1; nss <= 8; nss++) {
mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
- if ((mcs_user == NO_NSS_SUPPORT) ||
- (mcs_resp == NO_NSS_SUPPORT))
- SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
else
SET_VHTNSSMCS(mcs_map_result, nss,
min(mcs_user, mcs_resp));
}
- vht_cap->vht_cap.supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
+ vht_cap->supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
- vht_cap->vht_cap.supp_mcs.tx_highest = cpu_to_le16(tmp);
+ vht_cap->supp_mcs.tx_highest = cpu_to_le16(tmp);
return;
}
@@ -192,7 +195,8 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
(u8 *)bss_desc->bcn_vht_cap,
le16_to_cpu(vht_cap->header.len));
- mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
+ mwifiex_fill_vht_cap_tlv(priv, &vht_cap->vht_cap,
+ bss_desc->bss_band);
*buffer += sizeof(*vht_cap);
ret_len += sizeof(*vht_cap);
}
@@ -299,3 +303,81 @@ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv)
return;
}
+
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ struct ieee80211_vht_operation *vht_oper;
+
+ bss_desc = &priv->curr_bss_params.bss_descriptor;
+ vht_oper = bss_desc->bcn_vht_oper;
+
+ if (!bss_desc->bcn_vht_cap || !vht_oper)
+ return false;
+
+ if (vht_oper->chan_width == IEEE80211_VHT_CHANWIDTH_USE_HT)
+ return false;
+
+ return true;
+}
+
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+ u32 pri_chan, u8 chan_bw)
+{
+ u8 center_freq_idx = 0;
+
+ if (band & BAND_AAC) {
+ switch (pri_chan) {
+ case 36:
+ case 40:
+ case 44:
+ case 48:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 42;
+ break;
+ case 52:
+ case 56:
+ case 60:
+ case 64:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 58;
+ else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+ center_freq_idx = 50;
+ break;
+ case 100:
+ case 104:
+ case 108:
+ case 112:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 106;
+ break;
+ case 116:
+ case 120:
+ case 124:
+ case 128:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 122;
+ else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+ center_freq_idx = 114;
+ break;
+ case 132:
+ case 136:
+ case 140:
+ case 144:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 138;
+ break;
+ case 149:
+ case 153:
+ case 157:
+ case 161:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 155;
+ break;
+ default:
+ center_freq_idx = 42;
+ }
+ }
+
+ return center_freq_idx;
+}
diff --git a/drivers/net/wireless/mwifiex/11ac.h b/drivers/net/wireless/mwifiex/11ac.h
index 7c2c69b5b3eb..0b02cb6cfcb4 100644
--- a/drivers/net/wireless/mwifiex/11ac.h
+++ b/drivers/net/wireless/mwifiex/11ac.h
@@ -40,4 +40,6 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, u16 cmd_action,
struct mwifiex_11ac_vht_cfg *cfg);
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+ struct ieee80211_vht_cap *vht_cap, u8 bands);
#endif /* _MWIFIEX_11AC_H_ */
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
index 8d683070bdb3..e76b0db4e3e6 100644
--- a/drivers/net/wireless/mwifiex/11h.c
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -73,8 +73,8 @@ static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
{
u32 enable = flag;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, DOT11H_I, &enable);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, DOT11H_I, &enable, true);
}
/* This functions processes TLV buffer for a pending BSS Join command.
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 7db1a89fdd95..d14ead8beca8 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -34,22 +34,26 @@
*
* RD responder bit to set to clear in the extended capability header.
*/
-void
-mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
- struct mwifiex_ie_types_htcap *ht_cap)
+int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
+ struct ieee80211_ht_cap *ht_cap)
{
- uint16_t ht_ext_cap = le16_to_cpu(ht_cap->ht_cap.extended_ht_cap_info);
+ uint16_t ht_ext_cap = le16_to_cpu(ht_cap->extended_ht_cap_info);
struct ieee80211_supported_band *sband =
priv->wdev->wiphy->bands[radio_type];
- ht_cap->ht_cap.ampdu_params_info =
+ if (WARN_ON_ONCE(!sband)) {
+ dev_err(priv->adapter->dev, "Invalid radio type!\n");
+ return -EINVAL;
+ }
+
+ ht_cap->ampdu_params_info =
(sband->ht_cap.ampdu_factor &
IEEE80211_HT_AMPDU_PARM_FACTOR) |
((sband->ht_cap.ampdu_density <<
IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT) &
IEEE80211_HT_AMPDU_PARM_DENSITY);
- memcpy((u8 *) &ht_cap->ht_cap.mcs, &sband->ht_cap.mcs,
+ memcpy((u8 *)&ht_cap->mcs, &sband->ht_cap.mcs,
sizeof(sband->ht_cap.mcs));
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
@@ -57,13 +61,18 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
(priv->adapter->sec_chan_offset !=
IEEE80211_HT_PARAM_CHA_SEC_NONE)))
/* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
- SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
+ SETHT_MCS32(ht_cap->mcs.rx_mask);
/* Clear RD responder bit */
ht_ext_cap &= ~IEEE80211_HT_EXT_CAP_RD_RESPONDER;
- ht_cap->ht_cap.cap_info = cpu_to_le16(sband->ht_cap.cap);
- ht_cap->ht_cap.extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+ ht_cap->cap_info = cpu_to_le16(sband->ht_cap.cap);
+ ht_cap->extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+
+ if (ISSUPP_BEAMFORMING(priv->adapter->hw_dot_11n_dev_cap))
+ ht_cap->tx_BF_cap_info = cpu_to_le32(MWIFIEX_DEF_11N_TX_BF_CAP);
+
+ return 0;
}
/*
@@ -150,28 +159,34 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
int tid;
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
+ u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
& SSN_MASK);
- tid = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
- & IEEE80211_ADDBA_PARAM_TID_MASK)
- >> BLOCKACKPARAM_TID_POS;
- if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
- tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid,
- add_ba_rsp->peer_mac_addr);
- if (tx_ba_tbl) {
- dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
- tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
- } else {
- dev_err(priv->adapter->dev, "BA stream not created\n");
- }
- } else {
+ tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
+ >> BLOCKACKPARAM_TID_POS;
+ if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
TYPE_DELBA_SENT, true);
if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
priv->aggr_prio_tbl[tid].ampdu_ap =
BA_STREAM_NOT_ALLOWED;
+ return 0;
+ }
+
+ tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr);
+ if (tx_ba_tbl) {
+ dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
+ tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
+ if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
+ priv->add_ba_param.tx_amsdu &&
+ (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
+ tx_ba_tbl->amsdu = true;
+ else
+ tx_ba_tbl->amsdu = false;
+ } else {
+ dev_err(priv->adapter->dev, "BA stream not created\n");
}
return 0;
@@ -311,7 +326,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
(u8 *)bss_desc->bcn_ht_cap,
le16_to_cpu(ht_cap->header.len));
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
*buffer += sizeof(struct mwifiex_ie_types_htcap);
ret_len += sizeof(struct mwifiex_ie_types_htcap);
@@ -527,16 +542,39 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
{
struct host_cmd_ds_11n_addba_req add_ba_req;
+ struct mwifiex_sta_node *sta_ptr;
+ u32 tx_win_size = priv->add_ba_param.tx_win_size;
static u8 dialog_tok;
int ret;
+ u16 block_ack_param_set;
dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
- add_ba_req.block_ack_param_set = cpu_to_le16(
- (u16) ((tid << BLOCKACKPARAM_TID_POS) |
- (priv->add_ba_param.
- tx_win_size << BLOCKACKPARAM_WINSIZE_POS) |
- IMMEDIATE_BLOCK_ACK));
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->is_hw_11ac_capable &&
+ memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
+ sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
+ if (!sta_ptr) {
+ dev_warn(priv->adapter->dev,
+ "BA setup with unknown TDLS peer %pM!\n",
+ peer_mac);
+ return -1;
+ }
+ if (sta_ptr->is_11ac_enabled)
+ tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
+ }
+
+ block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) |
+ tx_win_size << BLOCKACKPARAM_WINSIZE_POS |
+ IMMEDIATE_BLOCK_ACK);
+
+ /* enable AMSDU inside AMPDU */
+ if (priv->add_ba_param.tx_amsdu &&
+ (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
+ block_ack_param_set |= BLOCKACKPARAM_AMSDU_SUPP_MASK;
+
+ add_ba_req.block_ack_param_set = cpu_to_le16(block_ack_param_set);
add_ba_req.block_ack_tmo = cpu_to_le16((u16)priv->add_ba_param.timeout);
++dialog_tok;
@@ -548,8 +586,8 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
memcpy(&add_ba_req.peer_mac_addr, peer_mac, ETH_ALEN);
/* We don't wait for the response of this command */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_REQ,
- 0, 0, &add_ba_req);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_REQ,
+ 0, 0, &add_ba_req, false);
return ret;
}
@@ -576,8 +614,8 @@ int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN);
/* We don't wait for the response of this command */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA,
- HostCmd_ACT_GEN_SET, 0, &delba);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA,
+ HostCmd_ACT_GEN_SET, 0, &delba, false);
return ret;
}
@@ -651,6 +689,7 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
__func__, rx_reo_tbl->tid);
memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
+ rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu;
rx_reo_tbl++;
count++;
if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED)
@@ -706,5 +745,8 @@ void mwifiex_set_ba_params(struct mwifiex_private *priv)
MWIFIEX_STA_AMPDU_DEF_RXWINSIZE;
}
+ priv->add_ba_param.tx_amsdu = true;
+ priv->add_ba_param.rx_amsdu = true;
+
return;
}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 375db01442bf..40b007a00f4b 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -34,8 +34,8 @@ int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc,
u8 **buffer);
-void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
- struct mwifiex_ie_types_htcap *);
+int mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
+ struct ieee80211_ht_cap *);
int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
u16 action, int *htcap_cfg);
void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
@@ -64,14 +64,46 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
-/*
- * This function checks whether AMPDU is allowed or not for a particular TID.
- */
static inline u8
-mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, int tid)
+mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
{
- return ((priv->aggr_prio_tbl[tid].ampdu_ap != BA_STREAM_NOT_ALLOWED)
- ? true : false);
+ struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ptr->ra);
+
+ if (unlikely(!node))
+ return false;
+
+ return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
+}
+
+/* This function checks whether AMSDU is allowed for BA stream. */
+static inline u8
+mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
+{
+ struct mwifiex_tx_ba_stream_tbl *tx_tbl;
+
+ tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
+ if (tx_tbl)
+ return tx_tbl->amsdu;
+
+ return false;
+}
+
+/* This function checks whether AMPDU is allowed or not for a particular TID. */
+static inline u8
+mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
+{
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+ } else {
+ if (ptr->tdls_link)
+ return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+
+ return (priv->aggr_prio_tbl[tid].ampdu_ap !=
+ BA_STREAM_NOT_ALLOWED) ? true : false;
+ }
}
/*
@@ -165,4 +197,14 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
return node->is_11n_enabled;
}
+
+static inline u8
+mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra)
+{
+ struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
+ if (node)
+ return node->is_11n_enabled;
+
+ return false;
+}
#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index ada809f576fe..0c3571f830b0 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -26,6 +26,56 @@
#include "11n.h"
#include "11n_rxreorder.h"
+/* This function will dispatch amsdu packet and forward it to kernel/upper
+ * layer.
+ */
+static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct rxpd *local_rx_pd = (struct rxpd *)(skb->data);
+ int ret;
+
+ if (le16_to_cpu(local_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
+ struct sk_buff_head list;
+ struct sk_buff *rx_skb;
+
+ __skb_queue_head_init(&list);
+
+ skb_pull(skb, le16_to_cpu(local_rx_pd->rx_pkt_offset));
+ skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
+
+ ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
+ priv->wdev->iftype, 0, false);
+
+ while (!skb_queue_empty(&list)) {
+ rx_skb = __skb_dequeue(&list);
+ ret = mwifiex_recv_packet(priv, rx_skb);
+ if (ret == -1)
+ dev_err(priv->adapter->dev,
+ "Rx of A-MSDU failed");
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+/* This function will process the rx packet and forward it to kernel/upper
+ * layer.
+ */
+static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
+{
+ int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
+
+ if (!ret)
+ return 0;
+
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ return mwifiex_handle_uap_rx_forward(priv, payload);
+
+ return mwifiex_process_rx_packet(priv, payload);
+}
+
/*
* This function dispatches all packets in the Rx reorder table until the
* start window.
@@ -35,8 +85,9 @@
* circular buffer.
*/
static void
-mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
- struct mwifiex_rx_reorder_tbl *tbl, int start_win)
+mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
+ struct mwifiex_rx_reorder_tbl *tbl,
+ int start_win)
{
int pkt_to_send, i;
void *rx_tmp_ptr;
@@ -54,12 +105,8 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
tbl->rx_reorder_ptr[i] = NULL;
}
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
- if (rx_tmp_ptr) {
- if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
- mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
- else
- mwifiex_process_rx_packet(priv, rx_tmp_ptr);
- }
+ if (rx_tmp_ptr)
+ mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -101,11 +148,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-
- if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
- mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
- else
- mwifiex_process_rx_packet(priv, rx_tmp_ptr);
+ mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -135,14 +178,15 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl)
{
unsigned long flags;
+ int start_win;
if (!tbl)
return;
- mwifiex_11n_dispatch_pkt(priv, tbl, (tbl->start_win + tbl->win_size) &
- (MAX_TID_VALUE - 1));
+ start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
+ mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
- del_timer(&tbl->timer_context.timer);
+ del_timer_sync(&tbl->timer_context.timer);
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
@@ -228,17 +272,17 @@ mwifiex_flush_data(unsigned long context)
{
struct reorder_tmr_cnxt *ctx =
(struct reorder_tmr_cnxt *) context;
- int start_win;
+ int start_win, seq_num;
- start_win = mwifiex_11n_find_last_seq_num(ctx->ptr);
+ seq_num = mwifiex_11n_find_last_seq_num(ctx->ptr);
- if (start_win < 0)
+ if (seq_num < 0)
return;
- dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", start_win);
- mwifiex_11n_dispatch_pkt(ctx->priv, ctx->ptr,
- (ctx->ptr->start_win + start_win + 1) &
- (MAX_TID_VALUE - 1));
+ dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
+ start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
+ mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
+ start_win);
}
/*
@@ -267,7 +311,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
*/
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
- mwifiex_11n_dispatch_pkt(priv, tbl, seq_num);
+ mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
return;
}
/* if !tbl then create one */
@@ -279,6 +323,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
new_node->tid = tid;
memcpy(new_node->ta, ta, ETH_ALEN);
new_node->start_win = seq_num;
+ new_node->init_win = seq_num;
+ new_node->flags = 0;
if (mwifiex_queuing_ra_based(priv)) {
dev_dbg(priv->adapter->dev,
@@ -290,15 +336,20 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
last_seq = node->rx_seq[tid];
}
} else {
- last_seq = priv->rx_seq[tid];
+ node = mwifiex_get_sta_entry(priv, ta);
+ if (node)
+ last_seq = node->rx_seq[tid];
+ else
+ last_seq = priv->rx_seq[tid];
}
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
- last_seq >= new_node->start_win)
+ last_seq >= new_node->start_win) {
new_node->start_win = last_seq + 1;
+ new_node->flags |= RXREOR_INIT_WINDOW_SHIFT;
+ }
new_node->win_size = win_size;
- new_node->flags = 0;
new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
GFP_KERNEL);
@@ -358,10 +409,28 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
*cmd_addba_req)
{
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
+ struct mwifiex_sta_node *sta_ptr;
+ u32 rx_win_size = priv->add_ba_param.rx_win_size;
u8 tid;
int win_size;
uint16_t block_ack_param_set;
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->is_hw_11ac_capable &&
+ memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
+ sta_ptr = mwifiex_get_sta_entry(priv,
+ cmd_addba_req->peer_mac_addr);
+ if (!sta_ptr) {
+ dev_warn(priv->adapter->dev,
+ "BA setup with unknown TDLS peer %pM!\n",
+ cmd_addba_req->peer_mac_addr);
+ return -1;
+ }
+ if (sta_ptr->is_11ac_enabled)
+ rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
+ }
+
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
@@ -376,10 +445,12 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
>> BLOCKACKPARAM_TID_POS;
add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
- /* We donot support AMSDU inside AMPDU, hence reset the bit */
- block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
- block_ack_param_set |= (priv->add_ba_param.rx_win_size <<
- BLOCKACKPARAM_WINSIZE_POS);
+
+ /* If we don't support AMSDU inside AMPDU, reset the bit */
+ if (!priv->add_ba_param.rx_amsdu ||
+ (priv->aggr_prio_tbl[tid].amsdu == BA_STREAM_NOT_ALLOWED))
+ block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
+ block_ack_param_set |= rx_win_size << BLOCKACKPARAM_WINSIZE_POS;
add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
& IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
@@ -431,33 +502,46 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl;
int start_win, end_win, win_size;
u16 pkt_index;
+ bool init_window_shift = false;
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
- if (pkt_type != PKT_TYPE_BAR) {
- if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
- mwifiex_handle_uap_rx_forward(priv, payload);
- else
- mwifiex_process_rx_packet(priv, payload);
- }
+ if (pkt_type != PKT_TYPE_BAR)
+ mwifiex_11n_dispatch_pkt(priv, payload);
return 0;
}
+
+ if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
+ mwifiex_11n_dispatch_pkt(priv, payload);
+ return 0;
+ }
+
start_win = tbl->start_win;
win_size = tbl->win_size;
end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
- del_timer(&tbl->timer_context.timer);
+ if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
+ init_window_shift = true;
+ tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
+ }
mod_timer(&tbl->timer_context.timer,
jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
- /*
- * If seq_num is less then starting win then ignore and drop the
- * packet
- */
if (tbl->flags & RXREOR_FORCE_NO_DROP) {
dev_dbg(priv->adapter->dev,
"RXREOR_FORCE_NO_DROP when HS is activated\n");
tbl->flags &= ~RXREOR_FORCE_NO_DROP;
+ } else if (init_window_shift && seq_num < start_win &&
+ seq_num >= tbl->init_win) {
+ dev_dbg(priv->adapter->dev,
+ "Sender TID sequence number reset %d->%d for SSN %d\n",
+ start_win, seq_num, tbl->init_win);
+ tbl->start_win = start_win = seq_num;
+ end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
} else {
+ /*
+ * If seq_num is less then starting win then ignore and drop
+ * the packet
+ */
if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
if (seq_num >= ((start_win + TWOPOW11) &
(MAX_TID_VALUE - 1)) &&
@@ -485,7 +569,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
start_win = (end_win - win_size) + 1;
else
start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
- mwifiex_11n_dispatch_pkt(priv, tbl, start_win);
+ mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
}
if (pkt_type != PKT_TYPE_BAR) {
@@ -576,16 +660,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
* Check if we had rejected the ADDBA, if yes then do not create
* the stream
*/
- if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
- win_size = (block_ack_param_set &
- IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
- >> BLOCKACKPARAM_WINSIZE_POS;
-
- dev_dbg(priv->adapter->dev,
- "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
- add_ba_rsp->peer_mac_addr, tid,
- add_ba_rsp->ssn, win_size);
- } else {
+ if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
@@ -593,8 +668,28 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
+
+ return 0;
}
+ win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
+ >> BLOCKACKPARAM_WINSIZE_POS;
+
+ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
+ add_ba_rsp->peer_mac_addr);
+ if (tbl) {
+ if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
+ priv->add_ba_param.rx_amsdu &&
+ (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
+ tbl->amsdu = true;
+ else
+ tbl->amsdu = false;
+ }
+
+ dev_dbg(priv->adapter->dev,
+ "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
+ add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
+
return 0;
}
@@ -615,7 +710,7 @@ void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
delba.del_ba_param_set |= cpu_to_le16(
(u16) event->origninator << DELBA_INITIATOR_POS);
delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
- mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba);
+ mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba, false);
}
/*
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 4064041ac852..0fc76e4a60f8 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -42,7 +42,8 @@
#define BA_SETUP_PACKET_OFFSET 16
enum mwifiex_rxreor_flags {
- RXREOR_FORCE_NO_DROP = 1<<0,
+ RXREOR_FORCE_NO_DROP = 1<<0,
+ RXREOR_INIT_WINDOW_SHIFT = 1<<1,
};
static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index a42a506fd32b..2aa208ffbe23 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -41,6 +41,7 @@ mwifiex-y += uap_txrx.o
mwifiex-y += cfg80211.o
mwifiex-y += ethtool.o
mwifiex-y += 11h.o
+mwifiex-y += tdls.o
mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index 3d64613ebb29..b9242c3dca43 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -131,7 +131,7 @@ info
hs_configured = <0/1, host sleep not configured/configured>
hs_activated = <0/1, extended host sleep not activated/activated>
num_tx_timeout = <number of Tx timeout>
- num_cmd_timeout = <number of timeout commands>
+ is_cmd_timedout = <0/1 command timeout not occurred/occurred>
timeout_cmd_id = <command id of the last timeout command>
timeout_cmd_act = <command action of the last timeout command>
last_cmd_id = <command id of the last several commands sent to device>
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8bfc07cd330e..21ee27ab7b74 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -252,9 +252,9 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
if (mask != priv->mgmt_frame_mask) {
priv->mgmt_frame_mask = mask;
- mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
- HostCmd_ACT_GEN_SET, 0,
- &priv->mgmt_frame_mask);
+ mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->mgmt_frame_mask, false);
wiphy_dbg(wiphy, "info: mgmt frame registered\n");
}
}
@@ -515,8 +515,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
+ HostCmd_ACT_GEN_SET, 0, NULL, false)) {
wiphy_err(wiphy, "11D: setting domain info in FW\n");
return -1;
}
@@ -580,9 +580,9 @@ mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr)
frag_thr > MWIFIEX_FRAG_MAX_VALUE)
frag_thr = MWIFIEX_FRAG_MAX_VALUE;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
- &frag_thr);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
+ &frag_thr, true);
}
/*
@@ -597,9 +597,9 @@ mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr)
if (rts_thr < MWIFIEX_RTS_MIN_VALUE || rts_thr > MWIFIEX_RTS_MAX_VALUE)
rts_thr = MWIFIEX_RTS_MAX_VALUE;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, RTS_THRESH_I,
- &rts_thr);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, RTS_THRESH_I,
+ &rts_thr, true);
}
/*
@@ -637,20 +637,19 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
bss_started = priv->bss_started;
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0,
- NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0,
+ NULL, true);
if (ret) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
kfree(bss_cfg);
return ret;
}
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_BSS_PARAMS_I, bss_cfg);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg,
+ false);
kfree(bss_cfg);
@@ -662,10 +661,9 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
if (!bss_started)
break;
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_UAP_BSS_START,
- HostCmd_ACT_GEN_SET, 0,
- NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
+ HostCmd_ACT_GEN_SET, 0,
+ NULL, false);
if (ret) {
wiphy_err(wiphy, "Failed to start BSS\n");
return ret;
@@ -700,8 +698,8 @@ mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
return 0;
@@ -721,13 +719,13 @@ mwifiex_cfg80211_init_p2p_client(struct mwifiex_private *priv)
return -1;
mode = P2P_MODE_DEVICE;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
mode = P2P_MODE_CLIENT;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
return 0;
@@ -747,13 +745,13 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
return -1;
mode = P2P_MODE_DEVICE;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
mode = P2P_MODE_GO;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
- HostCmd_ACT_GEN_SET, 0, &mode))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode, true))
return -1;
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
@@ -853,8 +851,8 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
return ret;
}
@@ -942,8 +940,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
/* Get signal information from the firmware */
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
- HostCmd_ACT_GEN_GET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
+ HostCmd_ACT_GEN_GET, 0, NULL, true)) {
dev_err(priv->adapter->dev, "failed to get signal information\n");
return -EFAULT;
}
@@ -954,9 +952,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
}
/* Get DTIM period information from firmware */
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
- &priv->dtim_period);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
+ &priv->dtim_period, true);
mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate);
@@ -1160,9 +1158,10 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
enum ieee80211_band band;
+ struct mwifiex_adapter *adapter = priv->adapter;
if (!priv->media_connected) {
- dev_err(priv->adapter->dev,
+ dev_err(adapter->dev,
"Can not set Tx data rate in disconnected state\n");
return -EINVAL;
}
@@ -1183,11 +1182,18 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
/* Fill HT MCS rates */
bitmap_rates[2] = mask->control[band].ht_mcs[0];
- if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+ if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
bitmap_rates[2] |= mask->control[band].ht_mcs[1] << 8;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
- HostCmd_ACT_GEN_SET, 0, bitmap_rates);
+ /* Fill VHT MCS rates */
+ if (adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ bitmap_rates[10] = mask->control[band].vht_mcs[0];
+ if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+ bitmap_rates[11] = mask->control[band].vht_mcs[1];
+ }
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
+ HostCmd_ACT_GEN_SET, 0, bitmap_rates, true);
}
/*
@@ -1216,14 +1222,14 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
subsc_evt.bcn_h_rssi_cfg.abs_value = abs(rssi_thold);
subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
- return mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
- 0, 0, &subsc_evt);
+ return mwifiex_send_cmd(priv,
+ HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+ 0, 0, &subsc_evt, true);
} else {
subsc_evt.action = HostCmd_ACT_BITWISE_CLR;
- return mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
- 0, 0, &subsc_evt);
+ return mwifiex_send_cmd(priv,
+ HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+ 0, 0, &subsc_evt, true);
}
return 0;
@@ -1276,10 +1282,9 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
if (!mac || is_broadcast_ether_addr(mac)) {
wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__);
list_for_each_entry(sta_node, &priv->sta_list, list) {
- if (mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_UAP_STA_DEAUTH,
- HostCmd_ACT_GEN_SET, 0,
- sta_node->mac_addr))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
+ HostCmd_ACT_GEN_SET, 0,
+ sta_node->mac_addr, true))
return -1;
mwifiex_uap_del_sta_data(priv, sta_node);
}
@@ -1289,10 +1294,9 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
sta_node = mwifiex_get_sta_entry(priv, mac);
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
if (sta_node) {
- if (mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_UAP_STA_DEAUTH,
- HostCmd_ACT_GEN_SET, 0,
- sta_node->mac_addr))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
+ HostCmd_ACT_GEN_SET, 0,
+ sta_node->mac_addr, true))
return -1;
mwifiex_uap_del_sta_data(priv, sta_node);
}
@@ -1328,13 +1332,40 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
tx_ant = RF_ANTENNA_AUTO;
rx_ant = RF_ANTENNA_AUTO;
}
+ } else {
+ struct ieee80211_sta_ht_cap *ht_info;
+ int rx_mcs_supp;
+ enum ieee80211_band band;
+
+ if ((tx_ant == 0x1 && rx_ant == 0x1)) {
+ adapter->user_dev_mcs_support = HT_STREAM_1X1;
+ if (adapter->is_hw_11ac_capable)
+ adapter->usr_dot_11ac_mcs_support =
+ MWIFIEX_11AC_MCS_MAP_1X1;
+ } else {
+ adapter->user_dev_mcs_support = HT_STREAM_2X2;
+ if (adapter->is_hw_11ac_capable)
+ adapter->usr_dot_11ac_mcs_support =
+ MWIFIEX_11AC_MCS_MAP_2X2;
+ }
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!adapter->wiphy->bands[band])
+ continue;
+
+ ht_info = &adapter->wiphy->bands[band]->ht_cap;
+ rx_mcs_supp =
+ GET_RXMCSSUPP(adapter->user_dev_mcs_support);
+ memset(&ht_info->mcs, 0, adapter->number_of_antenna);
+ memset(&ht_info->mcs, 0xff, rx_mcs_supp);
+ }
}
ant_cfg.tx_ant = tx_ant;
ant_cfg.rx_ant = rx_ant;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_ANTENNA,
- HostCmd_ACT_GEN_SET, 0, &ant_cfg);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_RF_ANTENNA,
+ HostCmd_ACT_GEN_SET, 0, &ant_cfg, true);
}
/* cfg80211 operation handler for stop ap.
@@ -1349,8 +1380,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
priv->ap_11n_enabled = 0;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL, true)) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
return -1;
}
@@ -1416,9 +1447,6 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_GN;
-
- if (params->chandef.width > NL80211_CHAN_WIDTH_40)
- config_bands |= BAND_GAC;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
config_bands = BAND_A;
@@ -1464,16 +1492,16 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
}
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL, true)) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
kfree(bss_cfg);
return -1;
}
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_BSS_PARAMS_I, bss_cfg)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg, false)) {
wiphy_err(wiphy, "Failed to set the SSID\n");
kfree(bss_cfg);
return -1;
@@ -1481,8 +1509,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
kfree(bss_cfg);
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_BSS_START,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
+ HostCmd_ACT_GEN_SET, 0, NULL, false)) {
wiphy_err(wiphy, "Failed to start the BSS\n");
return -1;
}
@@ -1492,9 +1520,9 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter, true))
return -1;
return 0;
@@ -1583,8 +1611,9 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
* the function notifies the CFG802.11 subsystem of the new BSS connection.
*/
static int
-mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
- u8 *bssid, int mode, struct ieee80211_channel *channel,
+mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
+ const u8 *ssid, const u8 *bssid, int mode,
+ struct ieee80211_channel *channel,
struct cfg80211_connect_params *sme, bool privacy)
{
struct cfg80211_ssid req_ssid;
@@ -1881,7 +1910,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
params->privacy);
done:
if (!ret) {
- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
+ params->chandef.chan, GFP_KERNEL);
dev_dbg(priv->adapter->dev,
"info: joined/created adhoc network with bssid"
" %pM successfully\n", priv->cfg_bssid);
@@ -2070,10 +2100,10 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
else
ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40;
- if (ISSUPP_RXSTBC(adapter->hw_dot_11n_dev_cap))
- ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ if (adapter->user_dev_mcs_support == HT_STREAM_2X2)
+ ht_info->cap |= 3 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
else
- ht_info->cap &= ~(3 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
if (ISSUPP_TXSTBC(adapter->hw_dot_11n_dev_cap))
ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
@@ -2098,8 +2128,8 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->cap |= IEEE80211_HT_CAP_SM_PS;
- rx_mcs_supp = GET_RXMCSSUPP(adapter->hw_dev_mcs_support);
- /* Set MCS for 1x1 */
+ rx_mcs_supp = GET_RXMCSSUPP(adapter->user_dev_mcs_support);
+ /* Set MCS for 1x1/2x2 */
memset(mcs, 0xff, rx_mcs_supp);
/* Clear all the other values */
memset(&mcs[rx_mcs_supp], 0,
@@ -2460,9 +2490,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
MWIFIEX_CRITERIA_UNICAST |
MWIFIEX_CRITERIA_MULTICAST;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MEF_CFG,
- HostCmd_ACT_GEN_SET, 0,
- &mef_cfg);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
kfree(mef_entry);
return ret;
@@ -2574,9 +2603,9 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
if (!coalesce) {
dev_dbg(adapter->dev,
"Disable coalesce and reset all previous rules\n");
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
- HostCmd_ACT_GEN_SET, 0,
- &coalesce_cfg);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
+ HostCmd_ACT_GEN_SET, 0,
+ &coalesce_cfg, true);
}
coalesce_cfg.num_of_rules = coalesce->n_rules;
@@ -2591,8 +2620,172 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
}
}
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
- HostCmd_ACT_GEN_SET, 0, &coalesce_cfg);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &coalesce_cfg, true);
+}
+
+/* cfg80211 ops handler for tdls_mgmt.
+ * Function prepares TDLS action frame packets and forwards them to FW
+ */
+static int
+mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, u32 peer_capability,
+ const u8 *extra_ies, size_t extra_ies_len)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int ret;
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+ return -ENOTSUPP;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Setup Request to %pM status_code=%d\n", peer,
+ status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Setup Response to %pM status_code=%d\n",
+ peer, status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_SETUP_CONFIRM:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Confirm to %pM status_code=%d\n", peer,
+ status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_TEARDOWN:
+ dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
+ peer);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Discovery Request to %pM\n", peer);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Discovery Response to %pM\n", peer);
+ ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ default:
+ dev_warn(priv->adapter->dev,
+ "Unknown TDLS mgmt/action frame %pM\n", peer);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation action)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
+ !(wiphy->flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+ return -ENOTSUPP;
+
+ dev_dbg(priv->adapter->dev,
+ "TDLS peer=%pM, oper=%d\n", peer, action);
+
+ switch (action) {
+ case NL80211_TDLS_ENABLE_LINK:
+ action = MWIFIEX_TDLS_ENABLE_LINK;
+ break;
+ case NL80211_TDLS_DISABLE_LINK:
+ action = MWIFIEX_TDLS_DISABLE_LINK;
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: teardown from driver not supported\n");
+ return -EINVAL;
+ case NL80211_TDLS_SETUP:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: setup from driver not supported\n");
+ return -EINVAL;
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: discovery from driver not supported\n");
+ return -EINVAL;
+ default:
+ dev_err(priv->adapter->dev,
+ "tdls_oper: operation not supported\n");
+ return -ENOTSUPP;
+ }
+
+ return mwifiex_tdls_oper(priv, peer, action);
+}
+
+static int
+mwifiex_cfg80211_add_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+ return -ENOTSUPP;
+
+ return mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CREATE_LINK);
+}
+
+static int
+mwifiex_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ int ret;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ /* we support change_station handler only for TDLS peers*/
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+ return -ENOTSUPP;
+
+ priv->sta_params = params;
+
+ ret = mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CONFIG_LINK);
+ priv->sta_params = NULL;
+
+ return ret;
}
/* station cfg80211 operations */
@@ -2630,6 +2823,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_wakeup = mwifiex_cfg80211_set_wakeup,
#endif
.set_coalesce = mwifiex_cfg80211_set_coalesce,
+ .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
+ .tdls_oper = mwifiex_cfg80211_tdls_oper,
+ .add_station = mwifiex_cfg80211_add_station,
+ .change_station = mwifiex_cfg80211_change_station,
};
#ifdef CONFIG_PM
@@ -2715,6 +2912,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+ if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
wiphy->regulatory_flags |=
REGULATORY_CUSTOM_REG |
REGULATORY_STRICT_REG;
@@ -2736,7 +2938,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->features |= NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_INACTIVITY_TIMER |
- NL80211_FEATURE_LOW_PRIORITY_SCAN;
+ NL80211_FEATURE_LOW_PRIORITY_SCAN |
+ NL80211_FEATURE_NEED_OBSS_SCAN;
/* Reserve space for mwifiex specific private data for BSS */
wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -2767,17 +2970,17 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
country_code);
}
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr, true);
wiphy->frag_threshold = thr;
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr, true);
wiphy->rts_threshold = thr;
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry, true);
wiphy->retry_short = (u8) retry;
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry, true);
wiphy->retry_long = (u8) retry;
adapter->wiphy = wiphy;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 9eefacbc844b..0ddec3d4b059 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -71,6 +71,95 @@ u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
+/* For every mcs_rate line, the first 8 bytes are for stream 1x1,
+ * and all 16 bytes are for stream 2x2.
+ */
+static const u16 mcs_rate[4][16] = {
+ /* LGI 40M */
+ { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+ 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+ /* SGI 40M */
+ { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+ 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+ /* LGI 20M */
+ { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+ 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+ /* SGI 20M */
+ { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+ 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+};
+
+/* AC rates */
+static const u16 ac_mcs_rate_nss1[8][10] = {
+ /* LG 160M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 160M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 80M */
+ { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
+ 0x249, 0x2BE, 0x30C },
+
+ /* SG 80M */
+ { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
+ 0x28A, 0x30C, 0x363 },
+
+ /* LG 40M */
+ { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
+ 0x10E, 0x144, 0x168 },
+
+ /* SG 40M */
+ { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
+ 0x12C, 0x168, 0x190 },
+
+ /* LG 20M */
+ { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
+
+ /* SG 20M */
+ { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
+};
+
+/* NSS2 note: the value in the table is 2 multiplier of the actual rate */
+static const u16 ac_mcs_rate_nss2[8][10] = {
+ /* LG 160M */
+ { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
+ 0x924, 0xAF8, 0xC30 },
+
+ /* SG 160M */
+ { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
+ 0xA28, 0xC30, 0xD8B },
+
+ /* LG 80M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 80M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 40M */
+ { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
+ 0x21C, 0x288, 0x2D0 },
+
+ /* SG 40M */
+ { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
+ 0x258, 0x2D0, 0x320 },
+
+ /* LG 20M */
+ { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
+ 0x138, 0x00 },
+
+ /* SG 20M */
+ { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
+ 0x15B, 0x00 },
+};
+
struct region_code_mapping {
u8 code;
u8 region[IEEE80211_COUNTRY_STRING_LEN];
@@ -109,95 +198,6 @@ u8 *mwifiex_11d_code_2_region(u8 code)
u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
u8 index, u8 ht_info)
{
- /*
- * For every mcs_rate line, the first 8 bytes are for stream 1x1,
- * and all 16 bytes are for stream 2x2.
- */
- u16 mcs_rate[4][16] = {
- /* LGI 40M */
- { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
- 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
- /* SGI 40M */
- { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
- 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
- /* LGI 20M */
- { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
- 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
- /* SGI 20M */
- { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
- 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
- };
- /* AC rates */
- u16 ac_mcs_rate_nss1[8][10] = {
- /* LG 160M */
- { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
- 0x492, 0x57C, 0x618 },
-
- /* SG 160M */
- { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
- 0x514, 0x618, 0x6C6 },
-
- /* LG 80M */
- { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
- 0x249, 0x2BE, 0x30C },
-
- /* SG 80M */
- { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
- 0x28A, 0x30C, 0x363 },
-
- /* LG 40M */
- { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
- 0x10E, 0x144, 0x168 },
-
- /* SG 40M */
- { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
- 0x12C, 0x168, 0x190 },
-
- /* LG 20M */
- { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
-
- /* SG 20M */
- { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
- };
- /* NSS2 note: the value in the table is 2 multiplier of the actual
- * rate
- */
- u16 ac_mcs_rate_nss2[8][10] = {
- /* LG 160M */
- { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
- 0x924, 0xAF8, 0xC30 },
-
- /* SG 160M */
- { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
- 0xA28, 0xC30, 0xD8B },
-
- /* LG 80M */
- { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
- 0x492, 0x57C, 0x618 },
-
- /* SG 80M */
- { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
- 0x514, 0x618, 0x6C6 },
-
- /* LG 40M */
- { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
- 0x21C, 0x288, 0x2D0 },
-
- /* SG 40M */
- { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
- 0x258, 0x2D0, 0x320 },
-
- /* LG 20M */
- { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
- 0x138, 0x00 },
-
- /* SG 20M */
- { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
- 0x15B, 0x00 },
- };
u32 rate = 0;
u8 mcs_index = 0;
u8 bw = 0;
@@ -252,28 +252,8 @@ u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv,
u8 index, u8 ht_info)
{
- /* For every mcs_rate line, the first 8 bytes are for stream 1x1,
- * and all 16 bytes are for stream 2x2.
- */
- u16 mcs_rate[4][16] = {
- /* LGI 40M */
- { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
- 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
- /* SGI 40M */
- { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
- 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
- /* LGI 20M */
- { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
- 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
- /* SGI 20M */
- { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
- 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
- };
u32 mcs_num_supp =
- (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
+ (priv->adapter->user_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
u32 rate;
if (priv->adapter->is_hw_11ac_capable)
@@ -458,7 +438,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
break;
case BAND_G:
case BAND_G | BAND_GN:
- case BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_g\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_g,
@@ -469,10 +448,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
case BAND_A | BAND_B:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
- case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN |
- BAND_AAC | BAND_GAC:
case BAND_B | BAND_G | BAND_GN:
- case BAND_B | BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_bg\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_bg,
@@ -496,7 +472,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
sizeof(supported_rates_a));
break;
case BAND_GN:
- case BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_n\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_n,
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1ddc8b2e3722..1062c918a7bf 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -37,13 +37,12 @@
static void
mwifiex_init_cmd_node(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node,
- u32 cmd_oid, void *data_buf)
+ u32 cmd_oid, void *data_buf, bool sync)
{
cmd_node->priv = priv;
cmd_node->cmd_oid = cmd_oid;
- if (priv->adapter->cmd_wait_q_required) {
- cmd_node->wait_q_enabled = priv->adapter->cmd_wait_q_required;
- priv->adapter->cmd_wait_q_required = false;
+ if (sync) {
+ cmd_node->wait_q_enabled = true;
cmd_node->cmd_wait_q_woken = false;
cmd_node->condition = &cmd_node->cmd_wait_q_woken;
}
@@ -166,8 +165,10 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
dev_err(adapter->dev,
"DNLD_CMD: FW in reset state, ignore cmd %#x\n",
cmd_code);
- mwifiex_complete_cmd(adapter, cmd_node);
+ if (cmd_node->wait_q_enabled)
+ mwifiex_complete_cmd(adapter, cmd_node);
mwifiex_recycle_cmd_node(adapter, cmd_node);
+ queue_work(adapter->workqueue, &adapter->main_work);
return -1;
}
@@ -276,11 +277,11 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ adapter->seq_num++;
sleep_cfm_buf->seq_num =
cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
(adapter->seq_num, priv->bss_num,
priv->bss_type)));
- adapter->seq_num++;
if (adapter->iface_type == MWIFIEX_USB) {
sleep_cfm_tmp =
@@ -480,28 +481,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
}
/*
- * This function is used to send synchronous command to the firmware.
- *
- * it allocates a wait queue for the command and wait for the command
- * response.
- */
-int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
- u16 cmd_action, u32 cmd_oid, void *data_buf)
-{
- int ret = 0;
- struct mwifiex_adapter *adapter = priv->adapter;
-
- adapter->cmd_wait_q_required = true;
-
- ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
- data_buf);
-
- return ret;
-}
-
-
-/*
- * This function prepares a command and asynchronously send it to the firmware.
+ * This function prepares a command and send it to the firmware.
*
* Preparation includes -
* - Sanity tests to make sure the card is still present or the FW
@@ -511,8 +491,8 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
* - Fill up the non-default parameters and buffer pointers
* - Add the command to pending queue
*/
-int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
- u16 cmd_action, u32 cmd_oid, void *data_buf)
+int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
+ u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync)
{
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -529,11 +509,21 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
return -1;
}
+ if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
+ dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n");
+ return -1;
+ }
+
if (adapter->surprise_removed) {
dev_err(adapter->dev, "PREP_CMD: card is removed\n");
return -1;
}
+ if (adapter->is_cmd_timedout) {
+ dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n");
+ return -1;
+ }
+
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
if (cmd_no != HostCmd_CMD_FUNC_INIT) {
dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
@@ -550,7 +540,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
}
/* Initialize the command node */
- mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf);
+ mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync);
if (!cmd_node->cmd_skb) {
dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
@@ -595,7 +585,8 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
}
/* Send command */
- if (cmd_no == HostCmd_CMD_802_11_SCAN) {
+ if (cmd_no == HostCmd_CMD_802_11_SCAN ||
+ cmd_no == HostCmd_CMD_802_11_SCAN_EXT) {
mwifiex_queue_scan_cmd(priv, cmd_node);
} else {
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
@@ -785,7 +776,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
unsigned long flags;
/* Now we got response from FW, cancel the command timer */
- del_timer(&adapter->cmd_timer);
+ del_timer_sync(&adapter->cmd_timer);
if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
resp = (struct host_cmd_ds_command *) adapter->upld_buf;
@@ -794,7 +785,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
return -1;
}
- adapter->num_cmd_timeout = 0;
+ adapter->is_cmd_timedout = 0;
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
@@ -905,8 +896,7 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
struct cmd_ctrl_node *cmd_node;
struct timeval tstamp;
- adapter->num_cmd_timeout++;
- adapter->dbg.num_cmd_timeout++;
+ adapter->is_cmd_timedout = 1;
if (!adapter->curr_cmd) {
dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
return;
@@ -929,8 +919,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
adapter->dbg.num_cmd_host_to_card_failure);
- dev_err(adapter->dev, "num_cmd_timeout = %d\n",
- adapter->dbg.num_cmd_timeout);
+ dev_err(adapter->dev, "is_cmd_timedout = %d\n",
+ adapter->is_cmd_timedout);
dev_err(adapter->dev, "num_tx_timeout = %d\n",
adapter->dbg.num_tx_timeout);
@@ -987,13 +977,14 @@ void
mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
- unsigned long flags;
+ unsigned long flags, cmd_flags;
+ struct mwifiex_private *priv;
+ int i;
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
/* Cancel current cmd */
if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd->wait_q_enabled = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
adapter->cmd_wait_q.status = -1;
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
@@ -1013,6 +1004,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
}
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
/* Cancel all pending scan command */
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
@@ -1027,9 +1019,21 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
}
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ if (adapter->scan_processing) {
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ adapter->scan_processing = false;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (!priv)
+ continue;
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ }
+ }
+ }
}
/*
@@ -1048,7 +1052,8 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
unsigned long scan_pending_q_flags;
- bool cancel_scan_cmd = false;
+ struct mwifiex_private *priv;
+ int i;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
@@ -1074,15 +1079,24 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
spin_lock_irqsave(&adapter->scan_pending_q_lock,
scan_pending_q_flags);
- cancel_scan_cmd = true;
}
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
scan_pending_q_flags);
- if (cancel_scan_cmd) {
+ if (adapter->scan_processing) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (!priv)
+ continue;
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ }
+ }
}
adapter->cmd_wait_q.status = -1;
}
@@ -1454,7 +1468,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
{
struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec;
struct mwifiex_adapter *adapter = priv->adapter;
- int i;
+ struct mwifiex_ie_types_header *tlv;
+ struct hw_spec_fw_api_rev *api_rev;
+ u16 resp_size, api_id;
+ int i, left_len, parsed_len = 0;
adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info);
@@ -1490,6 +1507,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
}
adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number);
+ adapter->fw_api_ver = (adapter->fw_release_number >> 16) & 0xff;
adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna);
if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) {
@@ -1498,8 +1516,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
/* Copy 11AC cap */
adapter->hw_dot_11ac_dev_cap =
le32_to_cpu(hw_spec->dot_11ac_dev_cap);
- adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap;
- adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap;
+ adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap
+ & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
+ adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap
+ & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
/* Copy 11AC mcs */
adapter->hw_dot_11ac_mcs_support =
@@ -1510,6 +1530,46 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
adapter->is_hw_11ac_capable = false;
}
+ resp_size = le16_to_cpu(resp->size) - S_DS_GEN;
+ if (resp_size > sizeof(struct host_cmd_ds_get_hw_spec)) {
+ /* we have variable HW SPEC information */
+ left_len = resp_size - sizeof(struct host_cmd_ds_get_hw_spec);
+ while (left_len > sizeof(struct mwifiex_ie_types_header)) {
+ tlv = (void *)&hw_spec->tlvs + parsed_len;
+ switch (le16_to_cpu(tlv->type)) {
+ case TLV_TYPE_FW_API_REV:
+ api_rev = (struct hw_spec_fw_api_rev *)tlv;
+ api_id = le16_to_cpu(api_rev->api_id);
+ switch (api_id) {
+ case KEY_API_VER_ID:
+ adapter->fw_key_api_major_ver =
+ api_rev->major_ver;
+ adapter->fw_key_api_minor_ver =
+ api_rev->minor_ver;
+ dev_dbg(adapter->dev,
+ "fw_key_api v%d.%d\n",
+ adapter->fw_key_api_major_ver,
+ adapter->fw_key_api_minor_ver);
+ break;
+ default:
+ dev_warn(adapter->dev,
+ "Unknown FW api_id: %d\n",
+ api_id);
+ break;
+ }
+ break;
+ default:
+ dev_warn(adapter->dev,
+ "Unknown GET_HW_SPEC TLV type: %#x\n",
+ le16_to_cpu(tlv->type));
+ break;
+ }
+ parsed_len += le16_to_cpu(tlv->len) +
+ sizeof(struct mwifiex_ie_types_header);
+ left_len -= parsed_len;
+ }
+ }
+
dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
adapter->fw_release_number);
dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
@@ -1538,6 +1598,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support;
+ adapter->user_dev_mcs_support = adapter->hw_dev_mcs_support;
if (adapter->if_ops.update_mp_end_port)
adapter->if_ops.update_mp_end_port(adapter,
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index a5f9875cfd6e..b8a49aad12fd 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -85,8 +85,8 @@ static struct mwifiex_debug_data items[] = {
item_addr(hs_activated), 1},
{"num_tx_timeout", item_size(num_tx_timeout),
item_addr(num_tx_timeout), 1},
- {"num_cmd_timeout", item_size(num_cmd_timeout),
- item_addr(num_cmd_timeout), 1},
+ {"is_cmd_timedout", item_size(is_cmd_timedout),
+ item_addr(is_cmd_timedout), 1},
{"timeout_cmd_id", item_size(timeout_cmd_id),
item_addr(timeout_cmd_id), 1},
{"timeout_cmd_act", item_size(timeout_cmd_act),
@@ -493,7 +493,7 @@ mwifiex_regrdwr_write(struct file *file,
{
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *) addr;
- size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
+ size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1);
int ret;
u32 reg_type = 0, reg_offset = 0, reg_value = UINT_MAX;
@@ -594,7 +594,7 @@ mwifiex_rdeeprom_write(struct file *file,
{
unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *) addr;
- size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
+ size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1);
int ret = 0;
int offset = -1, bytes = -1;
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 3a21bd03d6db..e7b3e16e5d34 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -75,10 +75,16 @@
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
+#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2)
#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
+#define MWIFIEX_TDLS_DISABLE_LINK 0x00
+#define MWIFIEX_TDLS_ENABLE_LINK 0x01
+#define MWIFIEX_TDLS_CREATE_LINK 0x02
+#define MWIFIEX_TDLS_CONFIG_LINK 0x03
+
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
@@ -92,6 +98,23 @@ enum mwifiex_bss_role {
MWIFIEX_BSS_ROLE_ANY = 0xff,
};
+enum mwifiex_tdls_status {
+ TDLS_NOT_SETUP = 0,
+ TDLS_SETUP_INPROGRESS,
+ TDLS_SETUP_COMPLETE,
+ TDLS_SETUP_FAILURE,
+ TDLS_LINK_TEARDOWN,
+};
+
+enum mwifiex_tdls_error_code {
+ TDLS_ERR_NO_ERROR = 0,
+ TDLS_ERR_INTERNAL_ERROR,
+ TDLS_ERR_MAX_LINKS_EST,
+ TDLS_ERR_LINK_EXISTS,
+ TDLS_ERR_LINK_NONEXISTENT,
+ TDLS_ERR_PEER_STA_UNREACHABLE = 25,
+};
+
#define BSS_ROLE_BIT_MASK BIT(0)
#define GET_BSS_ROLE(priv) ((priv)->bss_role & BSS_ROLE_BIT_MASK)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 5fa932d5f905..b485dc1ae5eb 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -50,21 +50,23 @@ struct tx_packet_hdr {
#define HOSTCMD_SUPPORTED_RATES 14
#define N_SUPPORTED_RATES 3
#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN | \
- BAND_AN | BAND_GAC | BAND_AAC)
+ BAND_AN | BAND_AAC)
#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11) | \
- BIT(12) | BIT(13))
+ BIT(13))
#define IS_SUPPORT_MULTI_BANDS(adapter) \
(adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT)
-/* shift bit 12 and bit 13 in fw_cap_info from the firmware to bit 13 and 14
- * for 11ac so that bit 11 is for GN, bit 12 for AN, bit 13 for GAC, and bit
- * bit 14 for AAC, in order to be compatible with the band capability
- * defined in the driver after right shift of 8 bits.
+/* bit 13: 11ac BAND_AAC
+ * bit 12: reserved for lab testing, will be reused for BAND_AN
+ * bit 11: 11n BAND_GN
+ * bit 10: 11a BAND_A
+ * bit 9: 11g BAND_G
+ * bit 8: 11b BAND_B
+ * Map these bits to band capability by right shifting 8 bits.
*/
#define GET_FW_DEFAULT_BANDS(adapter) \
- (((((adapter->fw_cap_info & 0x3000) << 1) | \
- (adapter->fw_cap_info & ~0xF000)) >> 8) & \
+ (((adapter->fw_cap_info & 0x2f00) >> 8) & \
ALL_802_11_BANDS)
#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff
@@ -77,12 +79,21 @@ enum KEY_TYPE_ID {
KEY_TYPE_ID_WAPI,
KEY_TYPE_ID_AES_CMAC,
};
+
+#define WPA_PN_SIZE 8
+#define KEY_PARAMS_FIXED_LEN 10
+#define KEY_INDEX_MASK 0xf
+#define FW_KEY_API_VER_MAJOR_V2 2
+
#define KEY_MCAST BIT(0)
#define KEY_UNICAST BIT(1)
#define KEY_ENABLED BIT(2)
+#define KEY_DEFAULT BIT(3)
+#define KEY_TX_KEY BIT(4)
+#define KEY_RX_KEY BIT(5)
#define KEY_IGTK BIT(10)
-#define WAPI_KEY_LEN 50
+#define WAPI_KEY_LEN (WLAN_KEY_LEN_SMS4 + PN_LEN + 2)
#define MAX_POLL_TRIES 100
#define MAX_FIRMWARE_POLL_TRIES 100
@@ -130,6 +141,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
+#define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
@@ -144,6 +156,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
+#define TLV_TYPE_BSS_SCAN_RSP (PROPRIETARY_TLV_BASE_ID + 86)
+#define TLV_TYPE_BSS_SCAN_INFO (PROPRIETARY_TLV_BASE_ID + 87)
#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
@@ -154,6 +168,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
+#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -176,13 +192,21 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
+#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
IEEE80211_HT_CAP_SM_PS)
+#define MWIFIEX_DEF_11N_TX_BF_CAP 0x09E1E008
+
#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR
+#define GET_RXSTBC(x) (x & IEEE80211_HT_CAP_RX_STBC)
+#define MWIFIEX_RX_STBC1 0x0100
+#define MWIFIEX_RX_STBC12 0x0200
+#define MWIFIEX_RX_STBC123 0x0300
+
/* dev_cap bitmap
* BIT
* 0-16 reserved
@@ -204,6 +228,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
+#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
/* httxcfg bitmap
* 0 reserved
@@ -216,8 +241,21 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
*/
#define MWIFIEX_FW_DEF_HTTXCFG (BIT(1) | BIT(4) | BIT(5) | BIT(6))
+/* 11AC Tx and Rx MCS map for 1x1 mode:
+ * IEEE80211_VHT_MCS_SUPPORT_0_9 for stream 1
+ * IEEE80211_VHT_MCS_NOT_SUPPORTED for remaining 7 streams
+ */
+#define MWIFIEX_11AC_MCS_MAP_1X1 0xfffefffe
+
+/* 11AC Tx and Rx MCS map for 2x2 mode:
+ * IEEE80211_VHT_MCS_SUPPORT_0_9 for stream 1 and 2
+ * IEEE80211_VHT_MCS_NOT_SUPPORTED for remaining 6 streams
+ */
+#define MWIFIEX_11AC_MCS_MAP_2X2 0xfffafffa
+
#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
#define SETHT_MCS32(x) (x[4] |= 1)
+#define HT_STREAM_1X1 0x11
#define HT_STREAM_2X2 0x22
#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
@@ -226,17 +264,24 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
/* HW_SPEC fw_cap_info */
-#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13)))
+#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & BIT(13))
#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
(2 * (nss - 1)))
-#define NO_NSS_SUPPORT 0x3
-
#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16)
#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF)
+/* Clear SU Beanformer, MU beanformer, MU beanformee and
+ * sounding dimensions bits
+ */
+#define MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK \
+ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | \
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE | \
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | \
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK)
+
#define MOD_CLASS_HR_DSSS 0x03
#define MOD_CLASS_OFDM 0x07
#define MOD_CLASS_HT 0x08
@@ -295,10 +340,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
+#define HostCmd_CMD_802_11_SCAN_EXT 0x0107
#define HostCmd_CMD_COALESCE_CFG 0x010a
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
+#define HostCmd_CMD_TDLS_OPER 0x0122
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -440,6 +487,7 @@ enum P2P_MODES {
#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
+#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_ID_MASK 0xffff
@@ -468,6 +516,12 @@ enum P2P_MODES {
#define MWIFIEX_CRITERIA_UNICAST BIT(1)
#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
+#define ACT_TDLS_DELETE 0x00
+#define ACT_TDLS_CREATE 0x01
+#define ACT_TDLS_CONFIG 0x02
+
+#define MWIFIEX_FW_V15 15
+
struct mwifiex_ie_types_header {
__le16 type;
__le16 len;
@@ -480,6 +534,7 @@ struct mwifiex_ie_types_data {
#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
+#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
struct txpd {
u8 bss_type;
@@ -676,6 +731,56 @@ struct mwifiex_cmac_param {
u8 key[WLAN_KEY_LEN_AES_CMAC];
} __packed;
+struct mwifiex_wep_param {
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_WEP104];
+} __packed;
+
+struct mwifiex_tkip_param {
+ u8 pn[WPA_PN_SIZE];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_TKIP];
+} __packed;
+
+struct mwifiex_aes_param {
+ u8 pn[WPA_PN_SIZE];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_CCMP];
+} __packed;
+
+struct mwifiex_wapi_param {
+ u8 pn[PN_LEN];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_SMS4];
+} __packed;
+
+struct mwifiex_cmac_aes_param {
+ u8 ipn[IGTK_PN_LEN];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
+struct mwifiex_ie_type_key_param_set_v2 {
+ __le16 type;
+ __le16 len;
+ u8 mac_addr[ETH_ALEN];
+ u8 key_idx;
+ u8 key_type;
+ __le16 key_info;
+ union {
+ struct mwifiex_wep_param wep;
+ struct mwifiex_tkip_param tkip;
+ struct mwifiex_aes_param aes;
+ struct mwifiex_wapi_param wapi;
+ struct mwifiex_cmac_aes_param cmac_aes;
+ } key_params;
+} __packed;
+
+struct host_cmd_ds_802_11_key_material_v2 {
+ __le16 action;
+ struct mwifiex_ie_type_key_param_set_v2 key_param_set;
+} __packed;
+
struct host_cmd_ds_802_11_key_material {
__le16 action;
struct mwifiex_ie_type_key_param_set key_param_set;
@@ -727,6 +832,17 @@ struct host_cmd_ds_802_11_ps_mode_enh {
} params;
} __packed;
+enum FW_API_VER_ID {
+ KEY_API_VER_ID = 1,
+};
+
+struct hw_spec_fw_api_rev {
+ struct mwifiex_ie_types_header header;
+ __le16 api_id;
+ u8 major_ver;
+ u8 minor_ver;
+} __packed;
+
struct host_cmd_ds_get_hw_spec {
__le16 hw_if_version;
__le16 version;
@@ -748,6 +864,7 @@ struct host_cmd_ds_get_hw_spec {
__le32 reserved_6;
__le32 dot_11ac_dev_cap;
__le32 dot_11ac_mcs_support;
+ u8 tlvs[0];
} __packed;
struct host_cmd_ds_802_11_rssi_info {
@@ -993,6 +1110,7 @@ struct mwifiex_rate_scope {
__le16 hr_dsss_rate_bitmap;
__le16 ofdm_rate_bitmap;
__le16 ht_mcs_rate_bitmap[8];
+ __le16 vht_mcs_rate_bitmap[8];
} __packed;
struct mwifiex_rate_drop_pattern {
@@ -1047,14 +1165,28 @@ struct host_cmd_ds_rf_ant_siso {
__le16 ant_mode;
};
-struct mwifiex_bcn_param {
- u8 bssid[ETH_ALEN];
- u8 rssi;
+struct host_cmd_ds_tdls_oper {
+ __le16 tdls_action;
+ __le16 reason;
+ u8 peer_mac[ETH_ALEN];
+} __packed;
+
+struct mwifiex_fixed_bcn_param {
__le64 timestamp;
__le16 beacon_period;
__le16 cap_info_bitmap;
} __packed;
+struct mwifiex_event_scan_result {
+ __le16 event_id;
+ u8 bss_index;
+ u8 bss_type;
+ u8 more_event;
+ u8 reserved[3];
+ __le16 buf_size;
+ u8 num_of_set;
+} __packed;
+
#define MWIFIEX_USER_SCAN_CHAN_MAX 50
#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
@@ -1124,6 +1256,28 @@ struct host_cmd_ds_802_11_scan_rsp {
u8 bss_desc_and_tlv_buffer[1];
} __packed;
+struct host_cmd_ds_802_11_scan_ext {
+ u32 reserved;
+ u8 tlv_buffer[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_rsp {
+ struct mwifiex_ie_types_header header;
+ u8 bssid[ETH_ALEN];
+ u8 frame_body[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_info {
+ struct mwifiex_ie_types_header header;
+ __le16 rssi;
+ __le16 anpi;
+ u8 cca_busy_fraction;
+ u8 radio_type;
+ u8 channel;
+ u8 reserved;
+ __le64 tsf;
+} __packed;
+
struct host_cmd_ds_802_11_bg_scan_query {
u8 flush;
} __packed;
@@ -1296,6 +1450,11 @@ struct mwifiex_ie_types_vhtcap {
struct ieee80211_vht_cap vht_cap;
} __packed;
+struct mwifiex_ie_types_aid {
+ struct mwifiex_ie_types_header header;
+ __le16 aid;
+} __packed;
+
struct mwifiex_ie_types_oper_mode_ntf {
struct mwifiex_ie_types_header header;
u8 oper_mode;
@@ -1331,6 +1490,11 @@ struct mwifiex_ie_types_extcap {
u8 ext_capab[0];
} __packed;
+struct mwifiex_ie_types_qos_info {
+ struct mwifiex_ie_types_header header;
+ u8 qos_info;
+} __packed;
+
struct host_cmd_ds_mac_reg_access {
__le16 action;
__le16 offset;
@@ -1441,6 +1605,11 @@ struct host_cmd_tlv_rates {
u8 rates[0];
} __packed;
+struct mwifiex_ie_types_bssid_list {
+ struct mwifiex_ie_types_header header;
+ u8 bssid[ETH_ALEN];
+} __packed;
+
struct host_cmd_tlv_bcast_ssid {
struct mwifiex_ie_types_header header;
u8 bcast_ctl;
@@ -1634,6 +1803,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
struct host_cmd_ds_802_11_scan scan;
+ struct host_cmd_ds_802_11_scan_ext ext_scan;
struct host_cmd_ds_802_11_scan_rsp scan_resp;
struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
@@ -1653,6 +1823,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_11n_cfg htcfg;
struct host_cmd_ds_wmm_get_status get_wmm_status;
struct host_cmd_ds_802_11_key_material key_material;
+ struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
struct host_cmd_ds_version_ext verext;
struct host_cmd_ds_mgmt_frame_reg reg_mask;
struct host_cmd_ds_remain_on_chan roc_cfg;
@@ -1671,6 +1842,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sta_deauth sta_deauth;
struct host_cmd_11ac_vht_cfg vht_cfg;
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+ struct host_cmd_ds_tdls_oper tdls_oper;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 81ac001ee741..3bf3d58bbc02 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -138,9 +138,9 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
}
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
- return mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_CUSTOM_IE_I, ie_list);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_CUSTOM_IE_I, ie_list, false);
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 1d0a817f2bf0..4ecd0b208ac6 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -137,6 +137,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->csa_expire_time = 0;
priv->del_list_idx = 0;
priv->hs2_enabled = false;
+ memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -233,7 +234,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->pm_wakeup_fw_try = false;
- adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
adapter->is_hs_configured = false;
@@ -281,6 +281,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
adapter->empty_tx_q_cnt = 0;
+ adapter->ext_scan = true;
+ adapter->fw_key_api_major_ver = 0;
+ adapter->fw_key_api_minor_ver = 0;
}
/*
@@ -450,6 +453,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
INIT_LIST_HEAD(&priv->sta_list);
+ skb_queue_head_init(&priv->tdls_txq);
spin_lock_init(&priv->tx_ba_stream_tbl_lock);
spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -615,7 +619,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
/* cancel current command */
if (adapter->curr_cmd) {
dev_warn(adapter->dev, "curr_cmd is still in processing\n");
- del_timer(&adapter->cmd_timer);
+ del_timer_sync(&adapter->cmd_timer);
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
adapter->curr_cmd = NULL;
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 00a95f4c6a6c..ee494db54060 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -60,8 +60,7 @@ enum {
BAND_A = 4,
BAND_GN = 8,
BAND_AN = 16,
- BAND_GAC = 32,
- BAND_AAC = 64,
+ BAND_AAC = 32,
};
#define MWIFIEX_WPA_PASSHPHRASE_LEN 64
@@ -86,6 +85,10 @@ struct wep_key {
#define BAND_CONFIG_A 0x01
#define MWIFIEX_SUPPORTED_RATES 14
#define MWIFIEX_SUPPORTED_RATES_EXT 32
+#define MWIFIEX_TDLS_SUPPORTED_RATES 8
+#define MWIFIEX_TDLS_DEF_QOS_CAPAB 0xf
+#define MWIFIEX_PRIO_BK 2
+#define MWIFIEX_PRIO_VI 5
struct mwifiex_uap_bss_param {
u8 channel;
@@ -174,6 +177,7 @@ struct mwifiex_ds_rx_reorder_tbl {
struct mwifiex_ds_tx_ba_stream_tbl {
u16 tid;
u8 ra[ETH_ALEN];
+ u8 amsdu;
};
#define DBG_CMD_NUM 5
@@ -206,7 +210,7 @@ struct mwifiex_debug_info {
u32 num_cmd_assoc_success;
u32 num_cmd_assoc_failure;
u32 num_tx_timeout;
- u32 num_cmd_timeout;
+ u8 is_cmd_timedout;
u16 timeout_cmd_id;
u16 timeout_cmd_act;
u16 last_cmd_id[DBG_CMD_NUM];
@@ -233,7 +237,10 @@ struct mwifiex_ds_encrypt_key {
u8 mac_addr[ETH_ALEN];
u32 is_wapi_key;
u8 pn[PN_LEN]; /* packet number */
+ u8 pn_len;
u8 is_igtk_key;
+ u8 is_current_wep_key;
+ u8 is_rx_seq_valid;
};
struct mwifiex_power_cfg {
@@ -432,4 +439,16 @@ struct mwifiex_ds_coalesce_cfg {
struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
};
+struct mwifiex_ds_tdls_oper {
+ u16 tdls_action;
+ u8 peer_mac[ETH_ALEN];
+ u16 capability;
+ u8 qos_info;
+ u8 *ext_capab;
+ u8 ext_capab_len;
+ u8 *supp_rates;
+ u8 supp_rates_len;
+ u8 *ht_capab;
+};
+
#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 4e4686e6ac09..89dc62a467f4 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -515,8 +515,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos);
/* Append vendor specific IE TLV */
@@ -902,9 +901,9 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
if ((adapter->adhoc_start_band & BAND_G) &&
(priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter, false)) {
dev_err(adapter->dev,
"ADHOC_S_CMD: G Protection config failed\n");
return -1;
@@ -983,7 +982,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type = mwifiex_band_to_radio_type(
priv->adapter->config_bands);
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
if (adapter->sec_chan_offset ==
IEEE80211_HT_PARAM_CHA_SEC_NONE) {
@@ -1074,9 +1073,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
priv->
curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON;
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &curr_pkt_filter)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &curr_pkt_filter, false)) {
dev_err(priv->adapter->dev,
"ADHOC_J_CMD: G Protection config failed\n");
return -1;
@@ -1300,8 +1299,7 @@ int mwifiex_associate(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
@@ -1314,8 +1312,8 @@ int mwifiex_associate(struct mwifiex_private *priv,
retrieval */
priv->assoc_rsp_size = 0;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_ASSOCIATE,
- HostCmd_ACT_GEN_SET, 0, bss_desc);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_ASSOCIATE,
+ HostCmd_ACT_GEN_SET, 0, bss_desc, true);
}
/*
@@ -1335,14 +1333,13 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
priv->curr_bss_params.band);
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_START,
- HostCmd_ACT_GEN_SET, 0, adhoc_ssid);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_START,
+ HostCmd_ACT_GEN_SET, 0, adhoc_ssid, true);
}
/*
@@ -1376,8 +1373,7 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
@@ -1387,8 +1383,8 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
priv->curr_bss_params.band);
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
- HostCmd_ACT_GEN_SET, 0, bss_desc);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
+ HostCmd_ACT_GEN_SET, 0, bss_desc, true);
}
/*
@@ -1407,8 +1403,8 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
else
memcpy(mac_address, mac, ETH_ALEN);
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
- HostCmd_ACT_GEN_SET, 0, mac_address);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
+ HostCmd_ACT_GEN_SET, 0, mac_address, true);
return ret;
}
@@ -1436,19 +1432,31 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
GFP_KERNEL);
break;
case NL80211_IFTYPE_ADHOC:
- return mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_AD_HOC_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
case NL80211_IFTYPE_AP:
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
default:
break;
}
return ret;
}
-EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
+
+/* This function deauthenticates/disconnects from all BSS. */
+void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter)
+{
+ struct mwifiex_private *priv;
+ int i;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv)
+ mwifiex_deauthenticate(priv, NULL);
+ }
+}
+EXPORT_SYMBOL_GPL(mwifiex_deauthenticate_all);
/*
* This function converts band to radio type used in channel TLV.
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9d3d2758ec35..77db0886c6e2 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -38,7 +38,8 @@ static void scan_delay_timer_fn(unsigned long data)
if (adapter->surprise_removed)
return;
- if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+ if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT ||
+ !adapter->scan_processing) {
/*
* Abort scan operation by cancelling all pending scan
* commands
@@ -194,7 +195,7 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
if (adapter->if_ops.cleanup_if)
adapter->if_ops.cleanup_if(adapter);
- del_timer(&adapter->cmd_timer);
+ del_timer_sync(&adapter->cmd_timer);
/* Free private structures */
for (i = 0; i < adapter->priv_num; i++) {
@@ -678,8 +679,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr)
memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN);
/* Send request to firmware */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_MAC_ADDRESS,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_MAC_ADDRESS,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
if (!ret)
memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
@@ -871,7 +872,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
adapter->is_suspended = false;
adapter->hs_activated = false;
init_waitqueue_head(&adapter->hs_activate_wait_q);
- adapter->cmd_wait_q_required = false;
init_waitqueue_head(&adapter->cmd_wait_q.wait);
adapter->cmd_wait_q.status = 0;
adapter->scan_wait_q_woken = false;
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d8ad554ce39f..d53e1e8c9467 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -59,7 +59,7 @@ enum {
#define MWIFIEX_UPLD_SIZE (2312)
-#define MAX_EVENT_SIZE 1024
+#define MAX_EVENT_SIZE 2048
#define ARP_FILTER_MAX_BUF_SIZE 68
@@ -116,7 +116,7 @@ enum {
#define MWIFIEX_TYPE_DATA 0
#define MWIFIEX_TYPE_EVENT 3
-#define MAX_BITMAP_RATES_SIZE 10
+#define MAX_BITMAP_RATES_SIZE 18
#define MAX_CHANNEL_BAND_BG 14
#define MAX_CHANNEL_BAND_A 165
@@ -145,7 +145,6 @@ struct mwifiex_dbg {
u32 num_cmd_assoc_success;
u32 num_cmd_assoc_failure;
u32 num_tx_timeout;
- u32 num_cmd_timeout;
u16 timeout_cmd_id;
u16 timeout_cmd_act;
u16 last_cmd_id[DBG_CMD_NUM];
@@ -193,6 +192,8 @@ struct mwifiex_add_ba_param {
u32 tx_win_size;
u32 rx_win_size;
u32 timeout;
+ u8 tx_amsdu;
+ u8 rx_amsdu;
};
struct mwifiex_tx_aggr {
@@ -210,6 +211,7 @@ struct mwifiex_ra_list_tbl {
u16 ba_pkt_count;
u8 ba_packet_thr;
u16 total_pkt_count;
+ bool tdls_link;
};
struct mwifiex_tid_tbl {
@@ -262,6 +264,31 @@ struct ieee_types_generic {
u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)];
} __packed;
+struct ieee_types_bss_co_2040 {
+ struct ieee_types_header ieee_hdr;
+ u8 bss_2040co;
+} __packed;
+
+struct ieee_types_extcap {
+ struct ieee_types_header ieee_hdr;
+ u8 ext_capab[8];
+} __packed;
+
+struct ieee_types_vht_cap {
+ struct ieee_types_header ieee_hdr;
+ struct ieee80211_vht_cap vhtcap;
+} __packed;
+
+struct ieee_types_vht_oper {
+ struct ieee_types_header ieee_hdr;
+ struct ieee80211_vht_operation vhtoper;
+} __packed;
+
+struct ieee_types_aid {
+ struct ieee_types_header ieee_hdr;
+ u16 aid;
+} __packed;
+
struct mwifiex_bssdescriptor {
u8 mac_address[ETH_ALEN];
struct cfg80211_ssid ssid;
@@ -443,6 +470,7 @@ struct mwifiex_private {
u8 wpa_ie_len;
u8 wpa_is_gtk_set;
struct host_cmd_ds_802_11_key_material aes_key;
+ struct host_cmd_ds_802_11_key_material_v2 aes_key_v2;
u8 wapi_ie[256];
u8 wapi_ie_len;
u8 *wps_ie;
@@ -461,6 +489,7 @@ struct mwifiex_private {
struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID];
struct mwifiex_add_ba_param add_ba_param;
u16 rx_seq[MAX_NUM_TID];
+ u8 tos_to_tid_inv[MAX_NUM_TID];
struct list_head rx_reorder_tbl_ptr;
/* spin lock for rx_reorder_tbl_ptr queue */
spinlock_t rx_reorder_tbl_lock;
@@ -518,6 +547,8 @@ struct mwifiex_private {
unsigned long csa_expire_time;
u8 del_list_idx;
bool hs2_enabled;
+ struct station_parameters *sta_params;
+ struct sk_buff_head tdls_txq;
};
enum mwifiex_ba_status {
@@ -531,6 +562,7 @@ struct mwifiex_tx_ba_stream_tbl {
int tid;
u8 ra[ETH_ALEN];
enum mwifiex_ba_status ba_status;
+ u8 amsdu;
};
struct mwifiex_rx_reorder_tbl;
@@ -545,10 +577,12 @@ struct mwifiex_rx_reorder_tbl {
struct list_head list;
int tid;
u8 ta[ETH_ALEN];
+ int init_win;
int start_win;
int win_size;
void **rx_reorder_ptr;
struct reorder_tmr_cnxt timer_context;
+ u8 amsdu;
u8 flags;
};
@@ -583,17 +617,35 @@ struct mwifiex_bss_priv {
u64 fw_tsf;
};
-/* This is AP specific structure which stores information
- * about associated STA
+struct mwifiex_tdls_capab {
+ __le16 capab;
+ u8 rates[32];
+ u8 rates_len;
+ u8 qos_info;
+ u8 coex_2040;
+ u16 aid;
+ struct ieee80211_ht_cap ht_capb;
+ struct ieee80211_ht_operation ht_oper;
+ struct ieee_types_extcap extcap;
+ struct ieee_types_generic rsn_ie;
+ struct ieee80211_vht_cap vhtcap;
+ struct ieee80211_vht_operation vhtoper;
+};
+
+/* This is AP/TDLS specific structure which stores information
+ * about associated/peer STA
*/
struct mwifiex_sta_node {
struct list_head list;
u8 mac_addr[ETH_ALEN];
u8 is_wmm_enabled;
u8 is_11n_enabled;
+ u8 is_11ac_enabled;
u8 ampdu_sta[MAX_NUM_TID];
u16 rx_seq[MAX_NUM_TID];
u16 max_amsdu;
+ u8 tdls_status;
+ struct mwifiex_tdls_capab tdls_cap;
};
struct mwifiex_if_ops {
@@ -671,7 +723,7 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *curr_cmd;
/* spin lock for command */
spinlock_t mwifiex_cmd_lock;
- u32 num_cmd_timeout;
+ u8 is_cmd_timedout;
u16 last_init_cmd;
struct timer_list cmd_timer;
struct list_head cmd_free_q;
@@ -722,15 +774,16 @@ struct mwifiex_adapter {
u16 hs_activate_wait_q_woken;
wait_queue_head_t hs_activate_wait_q;
bool is_suspended;
+ bool hs_enabling;
u8 event_body[MAX_EVENT_SIZE];
u32 hw_dot_11n_dev_cap;
u8 hw_dev_mcs_support;
+ u8 user_dev_mcs_support;
u8 adhoc_11n_enabled;
u8 sec_chan_offset;
struct mwifiex_dbg dbg;
u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
u32 arp_filter_size;
- u16 cmd_wait_q_required;
struct mwifiex_wait_queue cmd_wait_q;
u8 scan_wait_q_woken;
spinlock_t queue_lock; /* lock for tx queues */
@@ -753,6 +806,9 @@ struct mwifiex_adapter {
atomic_t is_tx_received;
atomic_t pending_bridged_pkts;
struct semaphore *card_sem;
+ bool ext_scan;
+ u8 fw_api_ver;
+ u8 fw_key_api_major_ver, fw_key_api_minor_ver;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -788,11 +844,8 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter);
int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node);
-int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
- u16 cmd_action, u32 cmd_oid, void *data_buf);
-
-int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
- u16 cmd_action, u32 cmd_oid, void *data_buf);
+int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
+ u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync);
void mwifiex_cmd_timeout_func(unsigned long function_context);
@@ -880,6 +933,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason);
u8 mwifiex_band_to_radio_type(u8 band);
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter);
int mwifiex_adhoc_start(struct mwifiex_private *priv,
struct cfg80211_ssid *adhoc_ssid);
int mwifiex_adhoc_join(struct mwifiex_private *priv,
@@ -938,6 +992,12 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
struct cfg80211_ap_settings *params);
void mwifiex_set_ba_params(struct mwifiex_private *priv);
void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf);
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv);
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+ void *buf);
/*
* This function checks if the queuing is RA based or not.
@@ -1078,7 +1138,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
const u8 *key, int key_len, u8 key_index,
const u8 *mac_addr, int disable);
-int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
+int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
int mwifiex_get_ver_ext(struct mwifiex_private *priv);
@@ -1159,6 +1219,32 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
extern const struct ethtool_ops mwifiex_ethtool_ops;
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+ int ies_len, struct mwifiex_sta_node *node);
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *buf, int len);
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action);
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+ u32 pri_chan, u8 chan_bw);
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 7fe7b53fb17a..a7e8b96b2d90 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -39,20 +39,31 @@ static struct semaphore add_remove_card_sem;
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
- int size, int flags)
+ size_t size, int flags)
{
struct pcie_service_card *card = adapter->card;
- dma_addr_t buf_pa;
+ struct mwifiex_dma_mapping mapping;
- buf_pa = pci_map_single(card->dev, skb->data, size, flags);
- if (pci_dma_mapping_error(card->dev, buf_pa)) {
+ mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
+ if (pci_dma_mapping_error(card->dev, mapping.addr)) {
dev_err(adapter->dev, "failed to map pci memory!\n");
return -1;
}
- memcpy(skb->cb, &buf_pa, sizeof(dma_addr_t));
+ mapping.len = size;
+ memcpy(skb->cb, &mapping, sizeof(mapping));
return 0;
}
+static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb, int flags)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct mwifiex_dma_mapping mapping;
+
+ MWIFIEX_SKB_PACB(skb, &mapping);
+ pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
+}
+
/*
* This function reads sleep cookie and checks if FW is ready
*/
@@ -109,6 +120,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
/* Indicate device suspended */
adapter->is_suspended = true;
+ adapter->hs_enabling = false;
return 0;
}
@@ -179,6 +191,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->pcie.firmware = data->firmware;
card->pcie.reg = data->reg;
card->pcie.blksz_fw_dl = data->blksz_fw_dl;
+ card->pcie.tx_buf_size = data->tx_buf_size;
}
if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
@@ -199,7 +212,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
struct pcie_service_card *card;
struct mwifiex_adapter *adapter;
struct mwifiex_private *priv;
- int i;
card = pci_get_drvdata(pdev);
if (!card)
@@ -218,11 +230,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
mwifiex_pcie_resume(&pdev->dev);
#endif
- for (i = 0; i < adapter->priv_num; i++)
- if ((GET_BSS_ROLE(adapter->priv[i]) ==
- MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
- mwifiex_deauthenticate(adapter->priv[i], NULL);
+ mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -320,6 +328,30 @@ static void mwifiex_pcie_dev_wakeup_delay(struct mwifiex_adapter *adapter)
return;
}
+static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
+ u32 max_delay_loop_cnt)
+{
+ struct pcie_service_card *card = adapter->card;
+ u8 *buffer;
+ u32 sleep_cookie, count;
+
+ for (count = 0; count < max_delay_loop_cnt; count++) {
+ buffer = card->cmdrsp_buf->data - INTF_HEADER_LEN;
+ sleep_cookie = *(u32 *)buffer;
+
+ if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
+ dev_dbg(adapter->dev,
+ "sleep cookie found at count %d\n", count);
+ break;
+ }
+ usleep_range(20, 30);
+ }
+
+ if (count >= max_delay_loop_cnt)
+ dev_dbg(adapter->dev,
+ "max count reached while accessing sleep cookie\n");
+}
+
/* This function wakes up the card by reading fw_status register. */
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
@@ -456,7 +488,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
dev_dbg(adapter->dev,
"info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -513,7 +545,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
dev_dbg(adapter->dev,
"info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -549,8 +581,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
desc2 = card->txbd_ring[i];
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
- pci_unmap_single(card->dev, desc2->paddr,
- skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -558,8 +590,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
desc = card->txbd_ring[i];
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr,
- skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -587,8 +619,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
desc2 = card->rxbd_ring[i];
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
- pci_unmap_single(card->dev, desc2->paddr,
- skb->len, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -596,8 +628,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
desc = card->rxbd_ring[i];
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr,
- skb->len, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -622,8 +654,8 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
desc = card->evtbd_ring[i];
if (card->evt_buf_list[i]) {
skb = card->evt_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
card->evt_buf_list[i] = NULL;
@@ -861,7 +893,6 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card;
- dma_addr_t buf_pa;
if (!adapter)
return 0;
@@ -869,16 +900,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
card = adapter->card;
if (card && card->cmdrsp_buf) {
- MWIFIEX_SKB_PACB(card->cmdrsp_buf, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
}
if (card && card->cmd_buf) {
- MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, card->cmd_buf->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
}
return 0;
}
@@ -956,7 +985,6 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
{
struct sk_buff *skb;
- dma_addr_t buf_pa;
u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
struct mwifiex_pcie_buf_desc *desc;
struct mwifiex_pfu_buf_desc *desc2;
@@ -986,13 +1014,13 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
reg->tx_start_ptr;
skb = card->tx_buf_list[wrdoneidx];
+
if (skb) {
dev_dbg(adapter->dev,
"SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
skb, wrdoneidx);
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
unmap_count++;
@@ -1006,7 +1034,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
card->tx_buf_list[wrdoneidx] = NULL;
if (reg->pfu_enabled) {
- desc2 = (void *)card->txbd_ring[wrdoneidx];
+ desc2 = card->txbd_ring[wrdoneidx];
memset(desc2, 0, sizeof(*desc2));
} else {
desc = card->txbd_ring[wrdoneidx];
@@ -1082,16 +1110,16 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
tmp = (__le16 *)&payload[2];
*tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
- if (mwifiex_map_pci_memory(adapter, skb, skb->len ,
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len,
PCI_DMA_TODEVICE))
return -1;
wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
card->tx_buf_list[wrindx] = skb;
if (reg->pfu_enabled) {
- desc2 = (void *)card->txbd_ring[wrindx];
+ desc2 = card->txbd_ring[wrindx];
desc2->paddr = buf_pa;
desc2->len = (u16)skb->len;
desc2->frag_len = (u16)skb->len;
@@ -1162,8 +1190,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
return -EINPROGRESS;
done_unmap:
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
card->tx_buf_list[wrindx] = NULL;
if (reg->pfu_enabled)
memset(desc2, 0, sizeof(*desc2));
@@ -1217,9 +1244,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (!skb_data)
return -ENOMEM;
- MWIFIEX_SKB_PACB(skb_data, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
card->rx_buf_list[rd_index] = NULL;
/* Get data length from interface header -
@@ -1246,7 +1271,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
dev_dbg(adapter->dev,
"RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
@@ -1254,7 +1279,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
card->rx_buf_list[rd_index] = skb_tmp;
if (reg->pfu_enabled) {
- desc2 = (void *)card->rxbd_ring[rd_index];
+ desc2 = card->rxbd_ring[rd_index];
desc2->paddr = buf_pa;
desc2->len = skb_tmp->len;
desc2->frag_len = skb_tmp->len;
@@ -1322,7 +1347,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
/* Write the lower 32bits of the physical address to low command
* address scratch register
@@ -1331,8 +1356,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1344,8 +1368,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1354,8 +1377,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write command len to cmd_size scratch reg\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1364,8 +1386,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
CPU_INTR_DOOR_BELL)) {
dev_err(adapter->dev,
"%s: failed to assert door-bell intr\n", __func__);
- pci_unmap_single(card->dev, buf_pa,
- MWIFIEX_UPLD_SIZE, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1439,7 +1460,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
*/
if (card->cmdrsp_buf) {
- MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa);
+ cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf);
/* Write the lower 32bits of the cmdrsp buffer physical
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
@@ -1460,7 +1481,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
}
}
- MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa);
+ cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf);
/* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
(u32)cmd_buf_pa)) {
@@ -1514,13 +1535,17 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
int count = 0;
u16 rx_len;
__le16 pkt_len;
- dma_addr_t buf_pa;
dev_dbg(adapter->dev, "info: Rx CMD Response\n");
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+
+ /* Unmap the command as a response has been received. */
+ if (card->cmd_buf) {
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
+ card->cmd_buf = NULL;
+ }
pkt_len = *((__le16 *)skb->data);
rx_len = le16_to_cpu(pkt_len);
@@ -1539,6 +1564,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
"Write register failed\n");
return -1;
}
+ mwifiex_delay_for_sleep_cookie(adapter,
+ MWIFIEX_MAX_DELAY_COUNT);
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
@@ -1552,8 +1579,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
-
- MWIFIEX_SKB_PACB(skb, &buf_pa);
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
adapter->curr_cmd->resp_skb = skb;
adapter->cmd_resp_received = true;
@@ -1588,8 +1613,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
struct pcie_service_card *card = adapter->card;
- dma_addr_t buf_pa;
- struct sk_buff *skb_tmp;
if (skb) {
card->cmdrsp_buf = skb;
@@ -1599,14 +1622,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
return -1;
}
- skb_tmp = card->cmd_buf;
- if (skb_tmp) {
- MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb_tmp->len,
- PCI_DMA_FROMDEVICE);
- card->cmd_buf = NULL;
- }
-
return 0;
}
@@ -1619,7 +1634,6 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr, event;
- dma_addr_t buf_pa;
struct mwifiex_evt_buf_desc *desc;
if (!mwifiex_pcie_ok_to_access_hw(adapter))
@@ -1655,9 +1669,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
- MWIFIEX_SKB_PACB(skb_cmd, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
@@ -1703,7 +1715,6 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
int ret = 0;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr;
- dma_addr_t buf_pa;
struct mwifiex_evt_buf_desc *desc;
if (!skb)
@@ -1728,11 +1739,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
MAX_EVENT_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
card->evt_buf_list[rdptr] = skb;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
desc = card->evtbd_ring[rdptr];
- desc->paddr = buf_pa;
+ desc->paddr = MWIFIEX_SKB_DMA_ADDR(skb);
desc->len = (u16)skb->len;
desc->flags = 0;
skb = NULL;
@@ -1782,7 +1791,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct sk_buff *skb;
u32 txlen, tx_blocks = 0, tries, len;
u32 block_retry_cnt = 0;
- dma_addr_t buf_pa;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -1880,8 +1888,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
goto done;
}
- MWIFIEX_SKB_PACB(skb, &buf_pa);
-
/* Wait for the command done interrupt */
do {
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
@@ -1889,16 +1895,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dev_err(adapter->dev, "%s: Failed to read "
"interrupt status during fw dnld.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
ret = -1;
goto done;
}
} while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
CPU_INTR_DOOR_BELL);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
offset += txlen;
} while (true);
@@ -2338,6 +2343,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
}
adapter->dev = &pdev->dev;
+ adapter->tx_buf_size = card->pcie.tx_buf_size;
strcpy(adapter->fw_name, card->pcie.firmware);
return 0;
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index d322ab8604ea..e8ec561f8a64 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -97,6 +97,8 @@
#define MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD 256
/* FW awake cookie after FW ready */
#define FW_AWAKE_COOKIE (0xAA55AA55)
+#define MWIFIEX_DEF_SLEEP_COOKIE 0xBEEFBEEF
+#define MWIFIEX_MAX_DELAY_COUNT 5
struct mwifiex_pcie_card_reg {
u16 cmd_addr_lo;
@@ -195,18 +197,21 @@ struct mwifiex_pcie_device {
const char *firmware;
const struct mwifiex_pcie_card_reg *reg;
u16 blksz_fw_dl;
+ u16 tx_buf_size;
};
static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
.firmware = PCIE8766_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8766,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
};
static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
.firmware = PCIE8897_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8897,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
};
struct mwifiex_evt_buf_desc {
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 668547c2de84..7b3af3d29ded 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -591,11 +591,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
*chan_tlv_out,
struct mwifiex_chan_scan_param_set *scan_chan_list)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret = 0;
struct mwifiex_chan_scan_param_set *tmp_chan_list;
struct mwifiex_chan_scan_param_set *start_chan;
-
- u32 tlv_idx, rates_size;
+ struct cmd_ctrl_node *cmd_node, *tmp_node;
+ unsigned long flags;
+ u32 tlv_idx, rates_size, cmd_no;
u32 total_scan_time;
u32 done_early;
u8 radio_type;
@@ -733,9 +735,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Send the scan command to the firmware with the specified
cfg */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
- HostCmd_ACT_GEN_SET, 0,
- scan_cfg_out);
+ if (priv->adapter->ext_scan)
+ cmd_no = HostCmd_CMD_802_11_SCAN_EXT;
+ else
+ cmd_no = HostCmd_CMD_802_11_SCAN;
+
+ ret = mwifiex_send_cmd(priv, cmd_no, HostCmd_ACT_GEN_SET,
+ 0, scan_cfg_out, false);
/* rate IE is updated per scan command but same starting
* pointer is used each time so that rate IE from earlier
@@ -744,8 +750,19 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
scan_cfg_out->tlv_buf_len -=
sizeof(struct mwifiex_ie_types_header) + rates_size;
- if (ret)
+ if (ret) {
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ list_for_each_entry_safe(cmd_node, tmp_node,
+ &adapter->scan_pending_q,
+ list) {
+ list_del(&cmd_node->list);
+ cmd_node->wait_q_enabled = false;
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ }
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
break;
+ }
}
if (ret)
@@ -786,6 +803,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
+ struct mwifiex_ie_types_bssid_list *bssid_tlv;
u8 *tlv_pos;
u32 num_probes;
u32 ssid_len;
@@ -848,6 +866,17 @@ mwifiex_config_scan(struct mwifiex_private *priv,
user_scan_in->specific_bssid,
sizeof(scan_cfg_out->specific_bssid));
+ if (adapter->ext_scan &&
+ !is_zero_ether_addr(scan_cfg_out->specific_bssid)) {
+ bssid_tlv =
+ (struct mwifiex_ie_types_bssid_list *)tlv_pos;
+ bssid_tlv->header.type = cpu_to_le16(TLV_TYPE_BSSID);
+ bssid_tlv->header.len = cpu_to_le16(ETH_ALEN);
+ memcpy(bssid_tlv->bssid, user_scan_in->specific_bssid,
+ ETH_ALEN);
+ tlv_pos += sizeof(struct mwifiex_ie_types_bssid_list);
+ }
+
for (i = 0; i < user_scan_in->num_ssids; i++) {
ssid_len = user_scan_in->ssid_list[i].ssid_len;
@@ -941,7 +970,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type =
mwifiex_band_to_radio_type(priv->adapter->config_bands);
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
tlv_pos += sizeof(struct mwifiex_ie_types_htcap);
}
@@ -1576,6 +1605,228 @@ done:
return 0;
}
+static int
+mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
+ u32 *bytes_left, u64 fw_tsf, u8 *radio_type,
+ bool ext_scan, s32 rssi_val)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_chan_freq_power *cfp;
+ struct cfg80211_bss *bss;
+ u8 bssid[ETH_ALEN];
+ s32 rssi;
+ const u8 *ie_buf;
+ size_t ie_len;
+ u16 channel = 0;
+ u16 beacon_size = 0;
+ u32 curr_bcn_bytes;
+ u32 freq;
+ u16 beacon_period;
+ u16 cap_info_bitmap;
+ u8 *current_ptr;
+ u64 timestamp;
+ struct mwifiex_fixed_bcn_param *bcn_param;
+ struct mwifiex_bss_priv *bss_priv;
+
+ if (*bytes_left >= sizeof(beacon_size)) {
+ /* Extract & convert beacon size from command buffer */
+ memcpy(&beacon_size, *bss_info, sizeof(beacon_size));
+ *bytes_left -= sizeof(beacon_size);
+ *bss_info += sizeof(beacon_size);
+ }
+
+ if (!beacon_size || beacon_size > *bytes_left) {
+ *bss_info += *bytes_left;
+ *bytes_left = 0;
+ return -EFAULT;
+ }
+
+ /* Initialize the current working beacon pointer for this BSS
+ * iteration
+ */
+ current_ptr = *bss_info;
+
+ /* Advance the return beacon pointer past the current beacon */
+ *bss_info += beacon_size;
+ *bytes_left -= beacon_size;
+
+ curr_bcn_bytes = beacon_size;
+
+ /* First 5 fields are bssid, RSSI(for legacy scan only),
+ * time stamp, beacon interval, and capability information
+ */
+ if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
+ sizeof(struct mwifiex_fixed_bcn_param)) {
+ dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+ return -EFAULT;
+ }
+
+ memcpy(bssid, current_ptr, ETH_ALEN);
+ current_ptr += ETH_ALEN;
+ curr_bcn_bytes -= ETH_ALEN;
+
+ if (!ext_scan) {
+ rssi = (s32) *current_ptr;
+ rssi = (-rssi) * 100; /* Convert dBm to mBm */
+ current_ptr += sizeof(u8);
+ curr_bcn_bytes -= sizeof(u8);
+ dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ } else {
+ rssi = rssi_val;
+ }
+
+ bcn_param = (struct mwifiex_fixed_bcn_param *)current_ptr;
+ current_ptr += sizeof(*bcn_param);
+ curr_bcn_bytes -= sizeof(*bcn_param);
+
+ timestamp = le64_to_cpu(bcn_param->timestamp);
+ beacon_period = le16_to_cpu(bcn_param->beacon_period);
+
+ cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
+ dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
+ cap_info_bitmap);
+
+ /* Rest of the current buffer are IE's */
+ ie_buf = current_ptr;
+ ie_len = curr_bcn_bytes;
+ dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
+ curr_bcn_bytes);
+
+ while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
+ u8 element_id, element_len;
+
+ element_id = *current_ptr;
+ element_len = *(current_ptr + 1);
+ if (curr_bcn_bytes < element_len +
+ sizeof(struct ieee_types_header)) {
+ dev_err(adapter->dev,
+ "%s: bytes left < IE length\n", __func__);
+ return -EFAULT;
+ }
+ if (element_id == WLAN_EID_DS_PARAMS) {
+ channel = *(current_ptr +
+ sizeof(struct ieee_types_header));
+ break;
+ }
+
+ current_ptr += element_len + sizeof(struct ieee_types_header);
+ curr_bcn_bytes -= element_len +
+ sizeof(struct ieee_types_header);
+ }
+
+ if (channel) {
+ struct ieee80211_channel *chan;
+ u8 band;
+
+ /* Skip entry if on csa closed channel */
+ if (channel == priv->csa_chan) {
+ dev_dbg(adapter->dev,
+ "Dropping entry on csa closed channel\n");
+ return 0;
+ }
+
+ band = BAND_G;
+ if (radio_type)
+ band = mwifiex_radio_type_to_band(*radio_type &
+ (BIT(0) | BIT(1)));
+
+ cfp = mwifiex_get_cfp(priv, band, channel, 0);
+
+ freq = cfp ? cfp->freq : 0;
+
+ chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
+
+ if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
+ bss = cfg80211_inform_bss(priv->wdev->wiphy,
+ chan, bssid, timestamp,
+ cap_info_bitmap, beacon_period,
+ ie_buf, ie_len, rssi, GFP_KERNEL);
+ bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+ bss_priv->band = band;
+ bss_priv->fw_tsf = fw_tsf;
+ if (priv->media_connected &&
+ !memcmp(bssid, priv->curr_bss_params.bss_descriptor
+ .mac_address, ETH_ALEN))
+ mwifiex_update_curr_bss_params(priv, bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
+ }
+ } else {
+ dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ }
+
+ return 0;
+}
+
+static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct cmd_ctrl_node *cmd_node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ if (list_empty(&adapter->scan_pending_q)) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ adapter->scan_processing = false;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+ /* Need to indicate IOCTL complete */
+ if (adapter->curr_cmd->wait_q_enabled) {
+ adapter->cmd_wait_q.status = 0;
+ if (!priv->scan_request) {
+ dev_dbg(adapter->dev,
+ "complete internal scan\n");
+ mwifiex_complete_cmd(adapter,
+ adapter->curr_cmd);
+ }
+ }
+ if (priv->report_scan_result)
+ priv->report_scan_result = false;
+
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: notifying scan done\n");
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ } else {
+ priv->scan_aborting = false;
+ dev_dbg(adapter->dev, "info: scan already aborted\n");
+ }
+ } else {
+ if ((priv->scan_aborting && !priv->scan_request) ||
+ priv->scan_block) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
+ mod_timer(&priv->scan_delay_timer, jiffies);
+ dev_dbg(priv->adapter->dev,
+ "info: %s: triggerring scan abort\n", __func__);
+ } else if (!mwifiex_wmm_lists_empty(adapter) &&
+ (priv->scan_request && (priv->scan_request->flags &
+ NL80211_SCAN_FLAG_LOW_PRIORITY))) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = 1;
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ dev_dbg(priv->adapter->dev,
+ "info: %s: deferring scan\n", __func__);
+ } else {
+ /* Get scan command from scan_pending_q and put to
+ * cmd_pending_q
+ */
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+ true);
+ }
+ }
+
+ return;
+}
+
/*
* This function handles the command response of scan.
*
@@ -1600,7 +1851,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- struct cmd_ctrl_node *cmd_node;
struct host_cmd_ds_802_11_scan_rsp *scan_rsp;
struct mwifiex_ie_types_data *tlv_data;
struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
@@ -1609,12 +1859,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
u32 bytes_left;
u32 idx;
u32 tlv_buf_size;
- struct mwifiex_chan_freq_power *cfp;
struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
struct chan_band_param_set *chan_band;
u8 is_bgscan_resp;
- unsigned long flags;
- struct cfg80211_bss *bss;
+ __le64 fw_tsf = 0;
+ u8 *radio_type;
is_bgscan_resp = (le16_to_cpu(resp->command)
== HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -1676,220 +1925,194 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
&chan_band_tlv);
for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
- u8 bssid[ETH_ALEN];
- s32 rssi;
- const u8 *ie_buf;
- size_t ie_len;
- u16 channel = 0;
- __le64 fw_tsf = 0;
- u16 beacon_size = 0;
- u32 curr_bcn_bytes;
- u32 freq;
- u16 beacon_period;
- u16 cap_info_bitmap;
- u8 *current_ptr;
- u64 timestamp;
- struct mwifiex_bcn_param *bcn_param;
- struct mwifiex_bss_priv *bss_priv;
-
- if (bytes_left >= sizeof(beacon_size)) {
- /* Extract & convert beacon size from command buffer */
- memcpy(&beacon_size, bss_info, sizeof(beacon_size));
- bytes_left -= sizeof(beacon_size);
- bss_info += sizeof(beacon_size);
- }
+ /*
+ * If the TSF TLV was appended to the scan results, save this
+ * entry's TSF value in the fw_tsf field. It is the firmware's
+ * TSF value at the time the beacon or probe response was
+ * received.
+ */
+ if (tsf_tlv)
+ memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
+ sizeof(fw_tsf));
- if (!beacon_size || beacon_size > bytes_left) {
- bss_info += bytes_left;
- bytes_left = 0;
- ret = -1;
- goto check_next_scan;
+ if (chan_band_tlv) {
+ chan_band = &chan_band_tlv->chan_band_param[idx];
+ radio_type = &chan_band->radio_type;
+ } else {
+ radio_type = NULL;
}
- /* Initialize the current working beacon pointer for this BSS
- * iteration */
- current_ptr = bss_info;
+ ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+ &bytes_left,
+ le64_to_cpu(fw_tsf),
+ radio_type, false, 0);
+ if (ret)
+ goto check_next_scan;
+ }
- /* Advance the return beacon pointer past the current beacon */
- bss_info += beacon_size;
- bytes_left -= beacon_size;
+check_next_scan:
+ mwifiex_check_next_scan_command(priv);
+ return ret;
+}
- curr_bcn_bytes = beacon_size;
+/*
+ * This function prepares an extended scan command to be sent to the firmware
+ *
+ * This uses the scan command configuration sent to the command processing
+ * module in command preparation stage to configure a extended scan command
+ * structure to send to firmware.
+ */
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_802_11_scan_ext *ext_scan = &cmd->params.ext_scan;
+ struct mwifiex_scan_cmd_config *scan_cfg = data_buf;
- /*
- * First 5 fields are bssid, RSSI, time stamp, beacon interval,
- * and capability information
- */
- if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) {
- dev_err(adapter->dev,
- "InterpretIE: not enough bytes left\n");
- continue;
- }
- bcn_param = (struct mwifiex_bcn_param *)current_ptr;
- current_ptr += sizeof(*bcn_param);
- curr_bcn_bytes -= sizeof(*bcn_param);
+ memcpy(ext_scan->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
- memcpy(bssid, bcn_param->bssid, ETH_ALEN);
+ cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN_EXT);
- rssi = (s32) bcn_param->rssi;
- rssi = (-rssi) * 100; /* Convert dBm to mBm */
- dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ /* Size is equal to the sizeof(fixed portions) + the TLV len + header */
+ cmd->size = cpu_to_le16((u16)(sizeof(ext_scan->reserved)
+ + scan_cfg->tlv_buf_len + S_DS_GEN));
- timestamp = le64_to_cpu(bcn_param->timestamp);
- beacon_period = le16_to_cpu(bcn_param->beacon_period);
+ return 0;
+}
- cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
- dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
- cap_info_bitmap);
+/* This function handles the command response of extended scan */
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
+{
+ dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+ return 0;
+}
- /* Rest of the current buffer are IE's */
- ie_buf = current_ptr;
- ie_len = curr_bcn_bytes;
- dev_dbg(adapter->dev,
- "info: InterpretIE: IELength for this AP = %d\n",
- curr_bcn_bytes);
+/* This function This function handles the event extended scan report. It
+ * parses extended scan results and informs to cfg80211 stack.
+ */
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+ void *buf)
+{
+ int ret = 0;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 *bss_info;
+ u32 bytes_left, bytes_left_for_tlv, idx;
+ u16 type, len;
+ struct mwifiex_ie_types_data *tlv;
+ struct mwifiex_ie_types_bss_scan_rsp *scan_rsp_tlv;
+ struct mwifiex_ie_types_bss_scan_info *scan_info_tlv;
+ u8 *radio_type;
+ u64 fw_tsf = 0;
+ s32 rssi = 0;
+ struct mwifiex_event_scan_result *event_scan = buf;
+ u8 num_of_set = event_scan->num_of_set;
+ u8 *scan_resp = buf + sizeof(struct mwifiex_event_scan_result);
+ u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
+
+ if (num_of_set > MWIFIEX_MAX_AP) {
+ dev_err(adapter->dev,
+ "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
+ num_of_set);
+ ret = -1;
+ goto check_next_scan;
+ }
- while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
- u8 element_id, element_len;
+ bytes_left = scan_resp_size;
+ dev_dbg(adapter->dev,
+ "EXT_SCAN: size %d, returned %d APs...",
+ scan_resp_size, num_of_set);
- element_id = *current_ptr;
- element_len = *(current_ptr + 1);
- if (curr_bcn_bytes < element_len +
- sizeof(struct ieee_types_header)) {
- dev_err(priv->adapter->dev,
- "%s: bytes left < IE length\n",
- __func__);
- goto check_next_scan;
- }
- if (element_id == WLAN_EID_DS_PARAMS) {
- channel = *(current_ptr + sizeof(struct ieee_types_header));
- break;
- }
+ tlv = (struct mwifiex_ie_types_data *)scan_resp;
- current_ptr += element_len +
- sizeof(struct ieee_types_header);
- curr_bcn_bytes -= element_len +
- sizeof(struct ieee_types_header);
+ for (idx = 0; idx < num_of_set && bytes_left; idx++) {
+ type = le16_to_cpu(tlv->header.type);
+ len = le16_to_cpu(tlv->header.len);
+ if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
+ dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
+ break;
}
+ scan_rsp_tlv = NULL;
+ scan_info_tlv = NULL;
+ bytes_left_for_tlv = bytes_left;
- /*
- * If the TSF TLV was appended to the scan results, save this
- * entry's TSF value in the fw_tsf field. It is the firmware's
- * TSF value at the time the beacon or probe response was
- * received.
+ /* BSS response TLV with beacon or probe response buffer
+ * at the initial position of each descriptor
*/
- if (tsf_tlv)
- memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
- sizeof(fw_tsf));
-
- if (channel) {
- struct ieee80211_channel *chan;
- u8 band;
+ if (type != TLV_TYPE_BSS_SCAN_RSP)
+ break;
- /* Skip entry if on csa closed channel */
- if (channel == priv->csa_chan) {
- dev_dbg(adapter->dev,
- "Dropping entry on csa closed channel\n");
+ bss_info = (u8 *)tlv;
+ scan_rsp_tlv = (struct mwifiex_ie_types_bss_scan_rsp *)tlv;
+ tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+ bytes_left_for_tlv -=
+ (len + sizeof(struct mwifiex_ie_types_header));
+
+ while (bytes_left_for_tlv >=
+ sizeof(struct mwifiex_ie_types_header) &&
+ le16_to_cpu(tlv->header.type) != TLV_TYPE_BSS_SCAN_RSP) {
+ type = le16_to_cpu(tlv->header.type);
+ len = le16_to_cpu(tlv->header.len);
+ if (bytes_left_for_tlv <
+ sizeof(struct mwifiex_ie_types_header) + len) {
+ dev_err(adapter->dev,
+ "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
+ scan_rsp_tlv = NULL;
+ bytes_left_for_tlv = 0;
continue;
}
-
- band = BAND_G;
- if (chan_band_tlv) {
- chan_band =
- &chan_band_tlv->chan_band_param[idx];
- band = mwifiex_radio_type_to_band(
- chan_band->radio_type
- & (BIT(0) | BIT(1)));
- }
-
- cfp = mwifiex_get_cfp(priv, band, channel, 0);
-
- freq = cfp ? cfp->freq : 0;
-
- chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
-
- if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
- bss = cfg80211_inform_bss(priv->wdev->wiphy,
- chan, bssid, timestamp,
- cap_info_bitmap, beacon_period,
- ie_buf, ie_len, rssi, GFP_KERNEL);
- bss_priv = (struct mwifiex_bss_priv *)bss->priv;
- bss_priv->band = band;
- bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
- if (priv->media_connected &&
- !memcmp(bssid,
- priv->curr_bss_params.bss_descriptor
- .mac_address, ETH_ALEN))
- mwifiex_update_curr_bss_params(priv,
- bss);
- cfg80211_put_bss(priv->wdev->wiphy, bss);
+ switch (type) {
+ case TLV_TYPE_BSS_SCAN_INFO:
+ scan_info_tlv =
+ (struct mwifiex_ie_types_bss_scan_info *)tlv;
+ if (len !=
+ sizeof(struct mwifiex_ie_types_bss_scan_info) -
+ sizeof(struct mwifiex_ie_types_header)) {
+ bytes_left_for_tlv = 0;
+ continue;
+ }
+ break;
+ default:
+ break;
}
- } else {
- dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+ bytes_left -=
+ (len + sizeof(struct mwifiex_ie_types_header));
+ bytes_left_for_tlv -=
+ (len + sizeof(struct mwifiex_ie_types_header));
}
- }
-check_next_scan:
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- if (list_empty(&adapter->scan_pending_q)) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ if (!scan_rsp_tlv)
+ break;
- /* Need to indicate IOCTL complete */
- if (adapter->curr_cmd->wait_q_enabled) {
- adapter->cmd_wait_q.status = 0;
- if (!priv->scan_request) {
- dev_dbg(adapter->dev,
- "complete internal scan\n");
- mwifiex_complete_cmd(adapter,
- adapter->curr_cmd);
- }
- }
- if (priv->report_scan_result)
- priv->report_scan_result = false;
+ /* Advance pointer to the beacon buffer length and
+ * update the bytes count so that the function
+ * wlan_interpret_bss_desc_with_ie() can handle the
+ * scan buffer withut any change
+ */
+ bss_info += sizeof(u16);
+ bytes_left -= sizeof(u16);
- if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: notifying scan done\n");
- cfg80211_scan_done(priv->scan_request, 0);
- priv->scan_request = NULL;
- } else {
- priv->scan_aborting = false;
- dev_dbg(adapter->dev, "info: scan already aborted\n");
- }
- } else {
- if ((priv->scan_aborting && !priv->scan_request) ||
- priv->scan_block) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
- mod_timer(&priv->scan_delay_timer, jiffies);
- dev_dbg(priv->adapter->dev,
- "info: %s: triggerring scan abort\n", __func__);
- } else if (!mwifiex_wmm_lists_empty(adapter) &&
- (priv->scan_request && (priv->scan_request->flags &
- NL80211_SCAN_FLAG_LOW_PRIORITY))) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- adapter->scan_delay_cnt = 1;
- mod_timer(&priv->scan_delay_timer, jiffies +
- msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
- dev_dbg(priv->adapter->dev,
- "info: %s: deferring scan\n", __func__);
+ if (scan_info_tlv) {
+ rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
+ rssi *= 100; /* Convert dBm to mBm */
+ dev_dbg(adapter->dev,
+ "info: InterpretIE: RSSI=%d\n", rssi);
+ fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
+ radio_type = &scan_info_tlv->radio_type;
} else {
- /* Get scan command from scan_pending_q and put to
- cmd_pending_q */
- cmd_node = list_first_entry(&adapter->scan_pending_q,
- struct cmd_ctrl_node, list);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
- true);
+ radio_type = NULL;
}
+ ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+ &bytes_left, fw_tsf,
+ radio_type, true, rssi);
+ if (ret)
+ goto check_next_scan;
}
+check_next_scan:
+ if (!event_scan->more_event)
+ mwifiex_check_next_scan_command(priv);
+
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index b44a31523461..d206f04d4994 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -84,6 +84,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
card->supports_sdio_new_mode = data->supports_sdio_new_mode;
card->has_control_mask = data->has_control_mask;
+ card->tx_buf_size = data->tx_buf_size;
}
sdio_claim_host(func);
@@ -165,7 +166,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
struct sdio_mmc_card *card;
struct mwifiex_adapter *adapter;
struct mwifiex_private *priv;
- int i;
pr_debug("info: SDIO func num=%d\n", func->num);
@@ -184,11 +184,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
- for (i = 0; i < adapter->priv_num; i++)
- if ((GET_BSS_ROLE(adapter->priv[i]) ==
- MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
- mwifiex_deauthenticate(adapter->priv[i], NULL);
+ mwifiex_deauthenticate_all(adapter);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_disable_auto_ds(priv);
@@ -241,6 +237,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
/* Enable the Host Sleep */
if (!mwifiex_enable_hs(adapter)) {
dev_err(adapter->dev, "cmd: failed to suspend\n");
+ adapter->hs_enabling = false;
return -EFAULT;
}
@@ -249,6 +246,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
/* Indicate device suspended */
adapter->is_suspended = true;
+ adapter->hs_enabling = false;
return ret;
}
@@ -1760,6 +1758,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
/* save adapter pointer in card */
card->adapter = adapter;
+ adapter->tx_buf_size = card->tx_buf_size;
sdio_claim_host(func);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 532ae0ac4dfb..c71201b2e2a3 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -233,6 +233,7 @@ struct sdio_mmc_card {
u8 mp_agg_pkt_limit;
bool supports_sdio_new_mode;
bool has_control_mask;
+ u16 tx_buf_size;
u32 mp_rd_bitmap;
u32 mp_wr_bitmap;
@@ -256,6 +257,7 @@ struct mwifiex_sdio_device {
u8 mp_agg_pkt_limit;
bool supports_sdio_new_mode;
bool has_control_mask;
+ u16 tx_buf_size;
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -312,6 +314,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.mp_agg_pkt_limit = 8,
.supports_sdio_new_mode = false,
.has_control_mask = true,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -321,6 +324,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.mp_agg_pkt_limit = 8,
.supports_sdio_new_mode = false,
.has_control_mask = true,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -330,6 +334,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.mp_agg_pkt_limit = 8,
.supports_sdio_new_mode = false,
.has_control_mask = true,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -339,6 +344,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.mp_agg_pkt_limit = 16,
.supports_sdio_new_mode = true,
.has_control_mask = false,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
};
/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 9208a8816b80..e3cac1495cc7 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -185,6 +185,13 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
i++)
rate_scope->ht_mcs_rate_bitmap[i] =
cpu_to_le16(pbitmap_rates[2 + i]);
+ if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ for (i = 0;
+ i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
+ i++)
+ rate_scope->vht_mcs_rate_bitmap[i] =
+ cpu_to_le16(pbitmap_rates[10 + i]);
+ }
} else {
rate_scope->hr_dsss_rate_bitmap =
cpu_to_le16(priv->bitmap_rates[0]);
@@ -195,6 +202,13 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
i++)
rate_scope->ht_mcs_rate_bitmap[i] =
cpu_to_le16(priv->bitmap_rates[2 + i]);
+ if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ for (i = 0;
+ i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
+ i++)
+ rate_scope->vht_mcs_rate_bitmap[i] =
+ cpu_to_le16(priv->bitmap_rates[10 + i]);
+ }
}
rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope +
@@ -532,8 +546,228 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
return 0;
}
+/* This function populates key material v2 command
+ * to set network key for AES & CMAC AES.
+ */
+static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ struct mwifiex_ds_encrypt_key *enc_key,
+ struct host_cmd_ds_802_11_key_material_v2 *km)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u16 size, len = KEY_PARAMS_FIXED_LEN;
+
+ if (enc_key->is_igtk_key) {
+ dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.cmac_aes.ipn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_info &= cpu_to_le16(~KEY_MCAST);
+ km->key_param_set.key_info |= cpu_to_le16(KEY_IGTK);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES_CMAC;
+ km->key_param_set.key_params.cmac_aes.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.cmac_aes.key,
+ enc_key->key_material, enc_key->key_len);
+ len += sizeof(struct mwifiex_cmac_aes_param);
+ } else {
+ dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.aes.pn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES;
+ km->key_param_set.key_params.aes.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.aes.key,
+ enc_key->key_material, enc_key->key_len);
+ len += sizeof(struct mwifiex_aes_param);
+ }
+
+ km->key_param_set.len = cpu_to_le16(len);
+ size = len + sizeof(struct mwifiex_ie_types_header) +
+ sizeof(km->action) + S_DS_GEN;
+ cmd->size = cpu_to_le16(size);
+
+ return 0;
+}
+
+/* This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V2 format.
+ * Preparation includes -
+ * - Setting command ID, action and proper size
+ * - Setting WEP keys, WAPI keys or WPA keys along with required
+ * encryption (TKIP, AES) (as required)
+ * - Ensuring correct endian-ness
+ */
+static int
+mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 *mac = enc_key->mac_addr;
+ u16 key_info, len = KEY_PARAMS_FIXED_LEN;
+ struct host_cmd_ds_802_11_key_material_v2 *km =
+ &cmd->params.key_material_v2;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
+ km->action = cpu_to_le16(cmd_action);
+
+ if (cmd_action == HostCmd_ACT_GEN_GET) {
+ dev_dbg(adapter->dev, "%s: Get key\n", __func__);
+ km->key_param_set.key_idx =
+ enc_key->key_index & KEY_INDEX_MASK;
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+ if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+ key_info = KEY_UNICAST;
+ else
+ key_info = KEY_MCAST;
+
+ if (enc_key->is_igtk_key)
+ key_info |= KEY_IGTK;
+
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+ sizeof(km->action));
+ return 0;
+ }
+
+ memset(&km->key_param_set, 0,
+ sizeof(struct mwifiex_ie_type_key_param_set_v2));
+
+ if (enc_key->key_disable) {
+ dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
+ km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+ key_info = KEY_MCAST | KEY_UNICAST;
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+ sizeof(km->action));
+ return 0;
+ }
+
+ km->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ key_info = KEY_ENABLED;
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+ if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
+ dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
+ len += sizeof(struct mwifiex_wep_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ km->key_param_set.key_type = KEY_TYPE_ID_WEP;
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ key_info |= KEY_MCAST | KEY_UNICAST;
+ } else {
+ if (enc_key->is_current_wep_key) {
+ key_info |= KEY_MCAST | KEY_UNICAST;
+ if (km->key_param_set.key_idx ==
+ (priv->wep_key_curr_index & KEY_INDEX_MASK))
+ key_info |= KEY_DEFAULT;
+ } else {
+ if (mac) {
+ if (is_broadcast_ether_addr(mac))
+ key_info |= KEY_MCAST;
+ else
+ key_info |= KEY_UNICAST |
+ KEY_DEFAULT;
+ } else {
+ key_info |= KEY_MCAST;
+ }
+ }
+ }
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ km->key_param_set.key_params.wep.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.wep.key,
+ enc_key->key_material, enc_key->key_len);
+
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ return 0;
+ }
+
+ if (is_broadcast_ether_addr(mac))
+ key_info |= KEY_MCAST | KEY_RX_KEY;
+ else
+ key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
+
+ if (enc_key->is_wapi_key) {
+ dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
+ km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
+ memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
+ PN_LEN);
+ km->key_param_set.key_params.wapi.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.wapi.key,
+ enc_key->key_material, enc_key->key_len);
+ if (is_broadcast_ether_addr(mac))
+ priv->sec_info.wapi_key_on = true;
+
+ if (!priv->sec_info.wapi_key_on)
+ key_info |= KEY_DEFAULT;
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ len += sizeof(struct mwifiex_wapi_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ return 0;
+ }
+
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ key_info |= KEY_DEFAULT;
+ /* Enable unicast bit for WPA-NONE/ADHOC_AES */
+ if (!priv->sec_info.wpa2_enabled &&
+ !is_broadcast_ether_addr(mac))
+ key_info |= KEY_UNICAST;
+ } else {
+ /* Enable default key for WPA/WPA2 */
+ if (!priv->wpa_is_gtk_set)
+ key_info |= KEY_DEFAULT;
+ }
+
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ if (enc_key->key_len == WLAN_KEY_LEN_CCMP)
+ return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
+
+ if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
+ dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.tkip.pn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_type = KEY_TYPE_ID_TKIP;
+ km->key_param_set.key_params.tkip.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.tkip.key,
+ enc_key->key_material, enc_key->key_len);
+
+ len += sizeof(struct mwifiex_tkip_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ }
+
+ return 0;
+}
+
/*
* This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V1 format.
*
* Preparation includes -
* - Setting command ID, action and proper size
@@ -542,10 +776,10 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
* - Ensuring correct endian-ness
*/
static int
-mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action, u32 cmd_oid,
- struct mwifiex_ds_encrypt_key *enc_key)
+mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
{
struct host_cmd_ds_802_11_key_material *key_material =
&cmd->params.key_material;
@@ -724,6 +958,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
return ret;
}
+/* Wrapper function for setting network key depending upon FW KEY API version */
+static int
+mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
+{
+ if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ return mwifiex_cmd_802_11_key_material_v2(priv, cmd,
+ cmd_action, cmd_oid,
+ enc_key);
+
+ else
+ return mwifiex_cmd_802_11_key_material_v1(priv, cmd,
+ cmd_action, cmd_oid,
+ enc_key);
+}
+
/*
* This function prepares command to set/get 11d domain information.
*
@@ -1173,9 +1425,9 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
/* property header is 6 bytes, data must fit in cmd buffer */
if (prop && prop->value && prop->length > 6 &&
prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
- HostCmd_ACT_GEN_SET, 0,
- prop);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
+ HostCmd_ACT_GEN_SET, 0,
+ prop, true);
if (ret)
return ret;
}
@@ -1280,6 +1532,127 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_tdls_oper *tdls_oper = &cmd->params.tdls_oper;
+ struct mwifiex_ds_tdls_oper *oper = data_buf;
+ struct mwifiex_sta_node *sta_ptr;
+ struct host_cmd_tlv_rates *tlv_rates;
+ struct mwifiex_ie_types_htcap *ht_capab;
+ struct mwifiex_ie_types_qos_info *wmm_qos_info;
+ struct mwifiex_ie_types_extcap *extcap;
+ struct mwifiex_ie_types_vhtcap *vht_capab;
+ struct mwifiex_ie_types_aid *aid;
+ u8 *pos, qos_info;
+ u16 config_len = 0;
+ struct station_parameters *params = priv->sta_params;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_OPER);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+ le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_tdls_oper));
+
+ tdls_oper->reason = 0;
+ memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
+ sta_ptr = mwifiex_get_sta_entry(priv, oper->peer_mac);
+
+ pos = (u8 *)tdls_oper + sizeof(struct host_cmd_ds_tdls_oper);
+
+ switch (oper->tdls_action) {
+ case MWIFIEX_TDLS_DISABLE_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_DELETE);
+ break;
+ case MWIFIEX_TDLS_CREATE_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CREATE);
+ break;
+ case MWIFIEX_TDLS_CONFIG_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
+
+ if (!params) {
+ dev_err(priv->adapter->dev,
+ "TDLS config params not available for %pM\n",
+ oper->peer_mac);
+ return -ENODATA;
+ }
+
+ *(__le16 *)pos = cpu_to_le16(params->capability);
+ config_len += sizeof(params->capability);
+
+ qos_info = params->uapsd_queues | (params->max_sp << 5);
+ wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos +
+ config_len);
+ wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA);
+ wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info));
+ wmm_qos_info->qos_info = qos_info;
+ config_len += sizeof(struct mwifiex_ie_types_qos_info);
+
+ if (params->ht_capa) {
+ ht_capab = (struct mwifiex_ie_types_htcap *)(pos +
+ config_len);
+ ht_capab->header.type =
+ cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ ht_capab->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ memcpy(&ht_capab->ht_cap, params->ht_capa,
+ sizeof(struct ieee80211_ht_cap));
+ config_len += sizeof(struct mwifiex_ie_types_htcap);
+ }
+
+ if (params->supported_rates && params->supported_rates_len) {
+ tlv_rates = (struct host_cmd_tlv_rates *)(pos +
+ config_len);
+ tlv_rates->header.type =
+ cpu_to_le16(WLAN_EID_SUPP_RATES);
+ tlv_rates->header.len =
+ cpu_to_le16(params->supported_rates_len);
+ memcpy(tlv_rates->rates, params->supported_rates,
+ params->supported_rates_len);
+ config_len += sizeof(struct host_cmd_tlv_rates) +
+ params->supported_rates_len;
+ }
+
+ if (params->ext_capab && params->ext_capab_len) {
+ extcap = (struct mwifiex_ie_types_extcap *)(pos +
+ config_len);
+ extcap->header.type =
+ cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
+ extcap->header.len = cpu_to_le16(params->ext_capab_len);
+ memcpy(extcap->ext_capab, params->ext_capab,
+ params->ext_capab_len);
+ config_len += sizeof(struct mwifiex_ie_types_extcap) +
+ params->ext_capab_len;
+ }
+ if (params->vht_capa) {
+ vht_capab = (struct mwifiex_ie_types_vhtcap *)(pos +
+ config_len);
+ vht_capab->header.type =
+ cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
+ vht_capab->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_vht_cap));
+ memcpy(&vht_capab->vht_cap, params->vht_capa,
+ sizeof(struct ieee80211_vht_cap));
+ config_len += sizeof(struct mwifiex_ie_types_vhtcap);
+ }
+ if (params->aid) {
+ aid = (struct mwifiex_ie_types_aid *)(pos + config_len);
+ aid->header.type = cpu_to_le16(WLAN_EID_AID);
+ aid->header.len = cpu_to_le16(sizeof(params->aid));
+ aid->aid = cpu_to_le16(params->aid);
+ config_len += sizeof(struct mwifiex_ie_types_aid);
+ }
+
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
+ return -ENOTSUPP;
+ }
+
+ le16_add_cpu(&cmd->size, config_len);
+
+ return 0;
+}
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1472,6 +1845,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_ibss_coalescing_status(cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_802_11_SCAN_EXT:
+ ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
+ break;
case HostCmd_CMD_MAC_REG_ACCESS:
case HostCmd_CMD_BBP_REG_ACCESS:
case HostCmd_CMD_RF_REG_ACCESS:
@@ -1507,6 +1883,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_TDLS_OPER:
+ ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1547,15 +1926,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (first_sta) {
if (priv->adapter->iface_type == MWIFIEX_PCIE) {
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_PCIE_DESC_DETAILS,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv,
+ HostCmd_CMD_PCIE_DESC_DETAILS,
+ HostCmd_ACT_GEN_SET, 0, NULL,
+ true);
if (ret)
return -1;
}
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_FUNC_INIT,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_FUNC_INIT,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
if (ret)
return -1;
@@ -1573,55 +1953,57 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
}
if (adapter->cal_data) {
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
+ HostCmd_ACT_GEN_SET, 0, NULL,
+ true);
if (ret)
return -1;
}
/* Read MAC address from HW */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
if (ret)
return -1;
/* Reconfigure tx buf size */
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_RECONFIGURE_TX_BUFF,
- HostCmd_ACT_GEN_SET, 0,
- &priv->adapter->tx_buf_size);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->adapter->tx_buf_size, true);
if (ret)
return -1;
if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Enable IEEE PS by default */
priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
- ret = mwifiex_send_cmd_sync(
- priv, HostCmd_CMD_802_11_PS_MODE_ENH,
- EN_AUTO_PS, BITMAP_STA_PS, NULL);
+ ret = mwifiex_send_cmd(priv,
+ HostCmd_CMD_802_11_PS_MODE_ENH,
+ EN_AUTO_PS, BITMAP_STA_PS, NULL,
+ true);
if (ret)
return -1;
}
}
/* get tx rate */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
if (ret)
return -1;
priv->data_rate = 0;
/* get tx power */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_TX_PWR,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_RF_TX_PWR,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
if (ret)
return -1;
if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
/* set ibss coalescing_status */
- ret = mwifiex_send_cmd_sync(
- priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
- HostCmd_ACT_GEN_SET, 0, &enable);
+ ret = mwifiex_send_cmd(
+ priv,
+ HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
+ HostCmd_ACT_GEN_SET, 0, &enable, true);
if (ret)
return -1;
}
@@ -1629,16 +2011,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
amsdu_aggr_ctrl.enable = true;
/* Send request to firmware */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
- HostCmd_ACT_GEN_SET, 0,
- &amsdu_aggr_ctrl);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
+ HostCmd_ACT_GEN_SET, 0,
+ &amsdu_aggr_ctrl, true);
if (ret)
return -1;
/* MAC Control must be the last command in init_fw */
/* set MAC Control */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter, true);
if (ret)
return -1;
@@ -1647,10 +2029,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
/* Enable auto deep sleep */
auto_ds.auto_ds = DEEP_SLEEP_ON;
auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_PS_MODE_ENH,
- EN_AUTO_PS, BITMAP_AUTO_DS,
- &auto_ds);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ EN_AUTO_PS, BITMAP_AUTO_DS,
+ &auto_ds, true);
if (ret)
return -1;
}
@@ -1658,9 +2039,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Send cmd to FW to enable/disable 11D function */
state_11d = ENABLE_11D;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, DOT11D_I,
- &state_11d);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, DOT11D_I,
+ &state_11d, true);
if (ret)
dev_err(priv->adapter->dev,
"11D: failed to enable 11D\n");
@@ -1673,8 +2054,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
* (Short GI, Channel BW, Green field support etc.) for transmit
*/
tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_11N_CFG,
- HostCmd_ACT_GEN_SET, 0, &tx_cfg);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_CFG,
+ HostCmd_ACT_GEN_SET, 0, &tx_cfg, true);
ret = -EINPROGRESS;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 24523e4015cb..bfebb0144df5 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -69,6 +69,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
break;
case HostCmd_CMD_802_11_SCAN:
+ case HostCmd_CMD_802_11_SCAN_EXT:
/* Cancel all pending scan command */
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
list_for_each_entry_safe(cmd_node, tmp_node,
@@ -157,8 +158,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
priv->subsc_evt_rssi_state = EVENT_HANDLED;
- mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
- 0, 0, subsc_evt);
+ mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+ 0, 0, subsc_evt, false);
return 0;
}
@@ -303,6 +304,15 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
priv->bitmap_rates[2 + i] =
le16_to_cpu(rate_scope->
ht_mcs_rate_bitmap[i]);
+
+ if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ for (i = 0; i < ARRAY_SIZE(rate_scope->
+ vht_mcs_rate_bitmap);
+ i++)
+ priv->bitmap_rates[10 + i] =
+ le16_to_cpu(rate_scope->
+ vht_mcs_rate_bitmap[i]);
+ }
break;
/* Add RATE_DROP tlv here */
}
@@ -316,9 +326,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
if (priv->is_data_rate_auto)
priv->data_rate = 0;
else
- return mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_TX_RATE_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
return 0;
}
@@ -561,13 +570,13 @@ static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
}
/*
- * This function handles the command response of set/get key material.
+ * This function handles the command response of set/get v1 key material.
*
* Handling includes updating the driver parameters to reflect the
* changes.
*/
-static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp)
+static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_802_11_key_material *key =
&resp->params.key_material;
@@ -590,6 +599,51 @@ static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
}
/*
+ * This function handles the command response of set/get v2 key material.
+ *
+ * Handling includes updating the driver parameters to reflect the
+ * changes.
+ */
+static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_802_11_key_material_v2 *key_v2;
+ __le16 len;
+
+ key_v2 = &resp->params.key_material_v2;
+ if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
+ if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
+ dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+ priv->wpa_is_gtk_set = true;
+ priv->scan_block = false;
+ }
+ }
+
+ if (key_v2->key_param_set.key_type != KEY_TYPE_ID_AES)
+ return 0;
+
+ memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
+ WLAN_KEY_LEN_CCMP);
+ priv->aes_key_v2.key_param_set.key_params.aes.key_len =
+ key_v2->key_param_set.key_params.aes.key_len;
+ len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
+ memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
+ key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
+
+ return 0;
+}
+
+/* Wrapper function for processing response of key material command */
+static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ return mwifiex_ret_802_11_key_material_v2(priv, resp);
+ else
+ return mwifiex_ret_802_11_key_material_v1(priv, resp);
+}
+
+/*
* This function handles the command response of get 11d domain information.
*/
static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
@@ -800,7 +854,60 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_tdls_oper *cmd_tdls_oper = &resp->params.tdls_oper;
+ u16 reason = le16_to_cpu(cmd_tdls_oper->reason);
+ u16 action = le16_to_cpu(cmd_tdls_oper->tdls_action);
+ struct mwifiex_sta_node *node =
+ mwifiex_get_sta_entry(priv, cmd_tdls_oper->peer_mac);
+ switch (action) {
+ case ACT_TDLS_DELETE:
+ if (reason)
+ dev_err(priv->adapter->dev,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ else
+ dev_dbg(priv->adapter->dev,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
+ break;
+ case ACT_TDLS_CREATE:
+ if (reason) {
+ dev_err(priv->adapter->dev,
+ "TDLS link creation for %pM failed: reason %d",
+ cmd_tdls_oper->peer_mac, reason);
+ if (node && reason != TDLS_ERR_LINK_EXISTS)
+ node->tdls_status = TDLS_SETUP_FAILURE;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "TDLS link creation for %pM successful",
+ cmd_tdls_oper->peer_mac);
+ }
+ break;
+ case ACT_TDLS_CONFIG:
+ if (reason) {
+ dev_err(priv->adapter->dev,
+ "TDLS link config for %pM failed, reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ if (node)
+ node->tdls_status = TDLS_SETUP_FAILURE;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
+ }
+ break;
+ default:
+ dev_err(priv->adapter->dev,
+ "Unknown TDLS command action respnse %d", action);
+ return -1;
+ }
+
+ return 0;
+}
/*
* This function handles the command response for subscribe event command.
*/
@@ -871,6 +978,10 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
ret = mwifiex_ret_802_11_scan(priv, resp);
adapter->curr_cmd->wait_q_enabled = false;
break;
+ case HostCmd_CMD_802_11_SCAN_EXT:
+ ret = mwifiex_ret_802_11_scan_ext(priv);
+ adapter->curr_cmd->wait_q_enabled = false;
+ break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
ret = mwifiex_ret_802_11_scan(priv, resp);
dev_dbg(adapter->dev,
@@ -999,6 +1110,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_COALESCE_CFG:
break;
+ case HostCmd_CMD_TDLS_OPER:
+ ret = mwifiex_ret_tdls_oper(priv, resp);
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8c351f71f72f..368450cc56c7 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -54,6 +54,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->scan_block = false;
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ mwifiex_disable_all_tdls_links(priv);
+
/* Free Tx and Rx packets, report disconnect to upper layer */
mwifiex_clean_txrx(priv);
@@ -112,7 +116,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
adapter->tx_lock_flag = false;
adapter->pps_uapsd_mode = false;
- if (adapter->num_cmd_timeout && adapter->curr_cmd)
+ if (adapter->is_cmd_timedout && adapter->curr_cmd)
return;
priv->media_connected = false;
dev_dbg(adapter->dev,
@@ -289,9 +293,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_HS_ACT_REQ:
dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_HS_CFG_ENH,
- 0, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH,
+ 0, 0, NULL, false);
break;
case EVENT_MIC_ERR_UNICAST:
@@ -322,27 +325,34 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_BG_SCAN_REPORT:
dev_dbg(adapter->dev, "event: BGS_REPORT\n");
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_BG_SCAN_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
case EVENT_PORT_RELEASE:
dev_dbg(adapter->dev, "event: PORT RELEASE\n");
break;
+ case EVENT_EXT_SCAN_REPORT:
+ dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ if (adapter->ext_scan)
+ ret = mwifiex_handle_event_ext_scan_report(priv,
+ adapter->event_skb->data);
+
+ break;
+
case EVENT_WMM_STATUS_CHANGE:
dev_dbg(adapter->dev, "event: WMM status changed\n");
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_WMM_GET_STATUS,
- 0, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS,
+ 0, 0, NULL, false);
break;
case EVENT_RSSI_LOW:
cfg80211_cqm_rssi_notify(priv->netdev,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
GFP_KERNEL);
- mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
break;
@@ -356,8 +366,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
cfg80211_cqm_rssi_notify(priv->netdev,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
GFP_KERNEL);
- mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
break;
@@ -384,15 +394,15 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_IBSS_COALESCED:
dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
- ret = mwifiex_send_cmd_async(priv,
+ ret = mwifiex_send_cmd(priv,
HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
case EVENT_ADDBA:
dev_dbg(adapter->dev, "event: ADDBA Request\n");
- mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
- HostCmd_ACT_GEN_SET, 0,
- adapter->event_body);
+ mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
+ HostCmd_ACT_GEN_SET, 0,
+ adapter->event_body, false);
break;
case EVENT_DELBA:
dev_dbg(adapter->dev, "event: DELBA Request\n");
@@ -443,10 +453,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
priv->csa_expire_time =
jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_DEAUTHENTICATE,
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
HostCmd_ACT_GEN_SET, 0,
- priv->curr_bss_params.bss_descriptor.mac_address);
+ priv->curr_bss_params.bss_descriptor.mac_address,
+ false);
break;
default:
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index c5cb2ed19ec2..894270611f2c 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -64,6 +64,7 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
*(cmd_queued->condition));
if (status) {
dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+ mwifiex_cancel_all_pending_cmd(adapter);
return status;
}
@@ -108,19 +109,19 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
"info: Set multicast list=%d\n",
mcast_list->num_multicast_addr);
/* Send multicast addresses to firmware */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_MAC_MULTICAST_ADR,
- HostCmd_ACT_GEN_SET, 0,
- mcast_list);
+ ret = mwifiex_send_cmd(priv,
+ HostCmd_CMD_MAC_MULTICAST_ADR,
+ HostCmd_ACT_GEN_SET, 0,
+ mcast_list, false);
}
}
dev_dbg(priv->adapter->dev,
"info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
old_pkt_filter, priv->curr_pkt_filter);
if (old_pkt_filter != priv->curr_pkt_filter) {
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET,
- 0, &priv->curr_pkt_filter);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET,
+ 0, &priv->curr_pkt_filter, false);
}
return ret;
@@ -237,8 +238,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
rcu_read_unlock();
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
- HostCmd_ACT_GEN_SET, 0, NULL)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
+ HostCmd_ACT_GEN_SET, 0, NULL, false)) {
wiphy_err(priv->adapter->wiphy,
"11D: setting domain info in FW\n");
return -1;
@@ -290,7 +291,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
if (mwifiex_band_to_radio_type(bss_desc->bss_band) ==
HostCmd_SCAN_RADIO_TYPE_BG)
- config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC;
+ config_bands = BAND_B | BAND_G | BAND_GN;
else
config_bands = BAND_A | BAND_AN | BAND_AAC;
@@ -429,16 +430,13 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
status = -1;
break;
}
- if (cmd_type == MWIFIEX_SYNC_CMD)
- status = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_HS_CFG_ENH,
- HostCmd_ACT_GEN_SET, 0,
- &adapter->hs_cfg);
- else
- status = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_HS_CFG_ENH,
- HostCmd_ACT_GEN_SET, 0,
- &adapter->hs_cfg);
+
+ status = mwifiex_send_cmd(priv,
+ HostCmd_CMD_802_11_HS_CFG_ENH,
+ HostCmd_ACT_GEN_SET, 0,
+ &adapter->hs_cfg,
+ cmd_type == MWIFIEX_SYNC_CMD);
+
if (hs_cfg->conditions == HS_CFG_CANCEL)
/* Restore previous condition */
adapter->hs_cfg.conditions =
@@ -511,6 +509,9 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
hscfg.is_invoke_hostcmd = true;
+ adapter->hs_enabling = true;
+ mwifiex_cancel_all_pending_cmd(adapter);
+
if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_STA),
HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
@@ -519,8 +520,9 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
return false;
}
- if (wait_event_interruptible(adapter->hs_activate_wait_q,
- adapter->hs_activate_wait_q_woken)) {
+ if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
+ adapter->hs_activate_wait_q_woken,
+ (10 * HZ)) <= 0) {
dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
return false;
}
@@ -586,8 +588,8 @@ int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
auto_ds.auto_ds = DEEP_SLEEP_OFF;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
- DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, true);
}
EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds);
@@ -601,8 +603,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate)
{
int ret;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
if (!ret) {
if (priv->is_data_rate_auto)
@@ -698,8 +700,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_40;
}
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TXPWR_CFG,
- HostCmd_ACT_GEN_SET, 0, buf);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_TXPWR_CFG,
+ HostCmd_ACT_GEN_SET, 0, buf, true);
kfree(buf);
return ret;
@@ -722,12 +724,11 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode)
else
adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
sub_cmd = (*ps_mode) ? EN_AUTO_PS : DIS_AUTO_PS;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
- sub_cmd, BITMAP_STA_PS, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ sub_cmd, BITMAP_STA_PS, NULL, true);
if ((!ret) && (sub_cmd == DIS_AUTO_PS))
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_PS_MODE_ENH,
- GET_PS, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ GET_PS, 0, NULL, false);
return ret;
}
@@ -851,9 +852,9 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
- encrypt_key);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
+ encrypt_key, true);
}
/*
@@ -865,6 +866,7 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret;
struct mwifiex_wep_key *wep_key;
int index;
@@ -879,10 +881,17 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
/* Copy the required key as the current key */
wep_key = &priv->wep_key[index];
if (!wep_key->key_length) {
- dev_err(priv->adapter->dev,
+ dev_err(adapter->dev,
"key not set, so cannot enable it\n");
return -1;
}
+
+ if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) {
+ memcpy(encrypt_key->key_material,
+ wep_key->key_material, wep_key->key_length);
+ encrypt_key->key_len = wep_key->key_length;
+ }
+
priv->wep_key_curr_index = (u16) index;
priv->sec_info.wep_enabled = 1;
} else {
@@ -897,21 +906,32 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
priv->sec_info.wep_enabled = 1;
}
if (wep_key->key_length) {
+ void *enc_key;
+
+ if (encrypt_key->key_disable)
+ memset(&priv->wep_key[index], 0,
+ sizeof(struct mwifiex_wep_key));
+
+ if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ enc_key = encrypt_key;
+ else
+ enc_key = NULL;
+
/* Send request to firmware */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET, 0, enc_key, false);
if (ret)
return ret;
}
+
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter, true);
return ret;
}
@@ -946,10 +966,9 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
*/
/* Send the key as PTK to firmware */
encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET,
- KEY_INFO_ENABLED, encrypt_key);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET,
+ KEY_INFO_ENABLED, encrypt_key, false);
if (ret)
return ret;
@@ -973,15 +992,13 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
if (remove_key)
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET,
- !KEY_INFO_ENABLED, encrypt_key);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET,
+ !KEY_INFO_ENABLED, encrypt_key, true);
else
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET,
- KEY_INFO_ENABLED, encrypt_key);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET,
+ KEY_INFO_ENABLED, encrypt_key, true);
return ret;
}
@@ -1044,19 +1061,27 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
encrypt_key.key_len = key_len;
+ encrypt_key.key_index = key_index;
if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
encrypt_key.is_igtk_key = true;
if (!disable) {
- encrypt_key.key_index = key_index;
if (key_len)
memcpy(encrypt_key.key_material, key, key_len);
+ else
+ encrypt_key.is_current_wep_key = true;
+
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
- if (kp && kp->seq && kp->seq_len)
+ if (kp && kp->seq && kp->seq_len) {
memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
+ encrypt_key.pn_len = kp->seq_len;
+ encrypt_key.is_rx_seq_valid = true;
+ }
} else {
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
+ return 0;
encrypt_key.key_disable = true;
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
@@ -1077,8 +1102,8 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
struct mwifiex_ver_ext ver_ext;
memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_VERSION_EXT,
- HostCmd_ACT_GEN_GET, 0, &ver_ext))
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
+ HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
return -1;
return 0;
@@ -1103,8 +1128,8 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
ieee80211_frequency_to_channel(chan->center_freq);
roc_cfg.duration = cpu_to_le32(duration);
}
- if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN,
- action, 0, &roc_cfg)) {
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN,
+ action, 0, &roc_cfg, true)) {
dev_err(priv->adapter->dev, "failed to remain on channel\n");
return -1;
}
@@ -1136,8 +1161,8 @@ mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
break;
}
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
return mwifiex_sta_init_cmd(priv, false);
}
@@ -1152,8 +1177,8 @@ int
mwifiex_get_stats_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *log)
{
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG,
- HostCmd_ACT_GEN_GET, 0, log);
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_GET_LOG,
+ HostCmd_ACT_GEN_GET, 0, log, true);
}
/*
@@ -1195,8 +1220,7 @@ static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv,
return -1;
}
- return mwifiex_send_cmd_sync(priv, cmd_no, action, 0, reg_rw);
-
+ return mwifiex_send_cmd(priv, cmd_no, action, 0, reg_rw, true);
}
/*
@@ -1261,8 +1285,8 @@ mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
rd_eeprom.byte_count = cpu_to_le16((u16) bytes);
/* Send request to firmware */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
- HostCmd_ACT_GEN_GET, 0, &rd_eeprom);
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
+ HostCmd_ACT_GEN_GET, 0, &rd_eeprom, true);
if (!ret)
memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA);
@@ -1391,7 +1415,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
* with requisite parameters and calls the IOCTL handler.
*/
int
-mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
+mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
{
struct mwifiex_ds_misc_gen_ie gen_ie;
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 4651d676df38..ed26387eccf5 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -88,11 +88,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
struct rxpd *local_rx_pd;
int hdr_chop;
struct ethhdr *eth;
+ u16 rx_pkt_off, rx_pkt_len;
+ u8 *offset;
local_rx_pd = (struct rxpd *) (skb->data);
- rx_pkt_hdr = (void *)local_rx_pd +
- le16_to_cpu(local_rx_pd->rx_pkt_offset);
+ rx_pkt_off = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+ rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
+ rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
@@ -142,6 +145,12 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return 0;
}
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ ntohs(rx_pkt_hdr->eth803_hdr.h_proto) == ETH_P_TDLS) {
+ offset = (u8 *)local_rx_pd + rx_pkt_off;
+ mwifiex_process_tdls_action_frame(priv, offset, rx_pkt_len);
+ }
+
priv->rxpd_rate = local_rx_pd->rx_rate;
priv->rxpd_htinfo = local_rx_pd->ht_info;
@@ -192,26 +201,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
return ret;
}
- if (rx_pkt_type == PKT_TYPE_AMSDU) {
- struct sk_buff_head list;
- struct sk_buff *rx_skb;
-
- __skb_queue_head_init(&list);
-
- skb_pull(skb, rx_pkt_offset);
- skb_trim(skb, rx_pkt_length);
-
- ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev->iftype, 0, false);
-
- while (!skb_queue_empty(&list)) {
- rx_skb = __skb_dequeue(&list);
- ret = mwifiex_recv_packet(priv, rx_skb);
- if (ret == -1)
- dev_err(adapter->dev, "Rx of A-MSDU failed");
- }
- return 0;
- } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+ if (rx_pkt_type == PKT_TYPE_MGMT) {
ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
dev_err(adapter->dev, "Rx of mgmt packet failed");
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 354d64c9606f..1236a5de7bca 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -95,6 +95,9 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
}
}
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+ local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
+
/* Offset of actual data */
pkt_offset = sizeof(struct txpd) + pad;
if (pkt_type == PKT_TYPE_MGMT) {
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
new file mode 100644
index 000000000000..97662a1ba58c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -0,0 +1,1044 @@
+/* Marvell Wireless LAN device driver: TDLS handling
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+#include "wmm.h"
+#include "11n.h"
+#include "11n_rxreorder.h"
+#include "11ac.h"
+
+#define TDLS_REQ_FIX_LEN 6
+#define TDLS_RESP_FIX_LEN 8
+#define TDLS_CONFIRM_FIX_LEN 6
+
+static void
+mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct list_head *tid_list;
+ struct sk_buff *skb, *tmp;
+ struct mwifiex_txinfo *tx_info;
+ unsigned long flags;
+ u32 tid;
+ u8 tid_down;
+
+ dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
+ if (!ether_addr_equal(mac, skb->data))
+ continue;
+
+ __skb_unlink(skb, &priv->tdls_txq);
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tid = skb->priority;
+ tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
+
+ if (status == TDLS_SETUP_COMPLETE) {
+ ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
+ ra_list->tdls_link = true;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+ } else {
+ tid_list = &priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+ if (!list_empty(tid_list))
+ ra_list = list_first_entry(tid_list,
+ struct mwifiex_ra_list_tbl, list);
+ else
+ ra_list = NULL;
+ tx_info->flags &= ~MWIFIEX_BUF_FLAG_TDLS_PKT;
+ }
+
+ if (!ra_list) {
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ continue;
+ }
+
+ skb_queue_tail(&ra_list->skb_head, skb);
+
+ ra_list->ba_pkt_count++;
+ ra_list->total_pkt_count++;
+
+ if (atomic_read(&priv->wmm.highest_queued_prio) <
+ tos_to_tid_inv[tid_down])
+ atomic_set(&priv->wmm.highest_queued_prio,
+ tos_to_tid_inv[tid_down]);
+
+ atomic_inc(&priv->wmm.tx_pkts_queued);
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ return;
+}
+
+static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct list_head *ra_list_head;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ int i;
+
+ dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; i++) {
+ if (!list_empty(&priv->wmm.tid_tbl_ptr[i].ra_list)) {
+ ra_list_head = &priv->wmm.tid_tbl_ptr[i].ra_list;
+ list_for_each_entry(ra_list, ra_list_head, list) {
+ skb_queue_walk_safe(&ra_list->skb_head, skb,
+ tmp) {
+ if (!ether_addr_equal(mac, skb->data))
+ continue;
+ __skb_unlink(skb, &ra_list->skb_head);
+ atomic_dec(&priv->wmm.tx_pkts_queued);
+ ra_list->total_pkt_count--;
+ skb_queue_tail(&priv->tdls_txq, skb);
+ }
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ return;
+}
+
+/* This function appends rate TLV to scan config command. */
+static int
+mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ u8 rates[MWIFIEX_SUPPORTED_RATES], *pos;
+ u16 rates_size, supp_rates_size, ext_rates_size;
+
+ memset(rates, 0, sizeof(rates));
+ rates_size = mwifiex_get_supported_rates(priv, rates);
+
+ supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
+
+ if (skb_tailroom(skb) < rates_size + 4) {
+ dev_err(priv->adapter->dev,
+ "Insuffient space while adding rates\n");
+ return -ENOMEM;
+ }
+
+ pos = skb_put(skb, supp_rates_size + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = supp_rates_size;
+ memcpy(pos, rates, supp_rates_size);
+
+ if (rates_size > MWIFIEX_TDLS_SUPPORTED_RATES) {
+ ext_rates_size = rates_size - MWIFIEX_TDLS_SUPPORTED_RATES;
+ pos = skb_put(skb, ext_rates_size + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = ext_rates_size;
+ memcpy(pos, rates + MWIFIEX_TDLS_SUPPORTED_RATES,
+ ext_rates_size);
+ }
+
+ return 0;
+}
+
+static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee_types_assoc_rsp *assoc_rsp;
+ u8 *pos;
+
+ assoc_rsp = (struct ieee_types_assoc_rsp *)&priv->assoc_rsp_buf;
+ pos = (void *)skb_put(skb, 4);
+ *pos++ = WLAN_EID_AID;
+ *pos++ = 2;
+ *pos++ = le16_to_cpu(assoc_rsp->a_id);
+
+ return;
+}
+
+static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_vht_cap vht_cap;
+ u8 *pos;
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+ *pos++ = WLAN_EID_VHT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_vht_cap);
+
+ memset(&vht_cap, 0, sizeof(struct ieee80211_vht_cap));
+
+ mwifiex_fill_vht_cap_tlv(priv, &vht_cap, priv->curr_bss_params.band);
+ memcpy(pos, &vht_cap, sizeof(vht_cap));
+
+ return 0;
+}
+
+static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
+ u8 *mac, struct sk_buff *skb)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ struct ieee80211_vht_operation *vht_oper;
+ struct ieee80211_vht_cap *vht_cap, *ap_vht_cap = NULL;
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 supp_chwd_set, peer_supp_chwd_set;
+ u8 *pos, ap_supp_chwd_set, chan_bw;
+ u16 mcs_map_user, mcs_map_resp, mcs_map_result;
+ u16 mcs_user, mcs_resp, nss;
+ u32 usr_vht_cap_info;
+
+ bss_desc = &priv->curr_bss_params.bss_descriptor;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (unlikely(!sta_ptr)) {
+ dev_warn(adapter->dev, "TDLS peer station not found in list\n");
+ return -1;
+ }
+
+ if (!mwifiex_is_bss_in_11ac_mode(priv)) {
+ if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
+ WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+ dev_dbg(adapter->dev,
+ "TDLS peer doesn't support wider bandwitdh\n");
+ return 0;
+ }
+ } else {
+ ap_vht_cap = bss_desc->bcn_vht_cap;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
+ *pos++ = WLAN_EID_VHT_OPERATION;
+ *pos++ = sizeof(struct ieee80211_vht_operation);
+ vht_oper = (struct ieee80211_vht_operation *)pos;
+
+ if (bss_desc->bss_band & BAND_A)
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
+ else
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
+
+ /* find the minmum bandwith between AP/TDLS peers */
+ vht_cap = &sta_ptr->tdls_cap.vhtcap;
+ supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
+ peer_supp_chwd_set =
+ GET_VHTCAP_CHWDSET(le32_to_cpu(vht_cap->vht_cap_info));
+ supp_chwd_set = min_t(u8, supp_chwd_set, peer_supp_chwd_set);
+
+ /* We need check AP's bandwidth when TDLS_WIDER_BANDWIDTH is off */
+
+ if (ap_vht_cap && sta_ptr->tdls_cap.extcap.ext_capab[7] &
+ WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+ ap_supp_chwd_set =
+ GET_VHTCAP_CHWDSET(le32_to_cpu(ap_vht_cap->vht_cap_info));
+ supp_chwd_set = min_t(u8, supp_chwd_set, ap_supp_chwd_set);
+ }
+
+ switch (supp_chwd_set) {
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+ break;
+ default:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
+ mcs_map_result = 0;
+
+ for (nss = 1; nss <= 8; nss++) {
+ mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
+ mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
+
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
+ else
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ min_t(u16, mcs_user, mcs_resp));
+ }
+
+ vht_oper->basic_mcs_set = cpu_to_le16(mcs_map_result);
+
+ switch (vht_oper->chan_width) {
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ default:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+ vht_oper->center_freq_seg1_idx =
+ mwifiex_get_center_freq_index(priv, BAND_AAC,
+ bss_desc->channel,
+ chan_bw);
+
+ return 0;
+}
+
+static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee_types_extcap *extcap;
+
+ extcap = (void *)skb_put(skb, sizeof(struct ieee_types_extcap));
+ extcap->ieee_hdr.element_id = WLAN_EID_EXT_CAPABILITY;
+ extcap->ieee_hdr.len = 8;
+ memset(extcap->ext_capab, 0, 8);
+ extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+
+ if (priv->adapter->is_hw_11ac_capable)
+ extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
+}
+
+static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
+{
+ u8 *pos = (void *)skb_put(skb, 3);
+
+ *pos++ = WLAN_EID_QOS_CAPA;
+ *pos++ = 1;
+ *pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB;
+}
+
+static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_tdls_data *tf;
+ int ret;
+ u16 capab;
+ struct ieee80211_ht_cap *ht_cap;
+ u8 radio, *pos;
+
+ capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+ tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+ memcpy(tf->da, peer, ETH_ALEN);
+ memcpy(tf->sa, priv->curr_addr, ETH_ALEN);
+ tf->ether_type = cpu_to_be16(ETH_P_TDLS);
+ tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_REQUEST;
+ skb_put(skb, sizeof(tf->u.setup_req));
+ tf->u.setup_req.dialog_token = dialog_token;
+ tf->u.setup_req.capability = cpu_to_le16(capab);
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+
+ case WLAN_TDLS_SETUP_RESPONSE:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
+ skb_put(skb, sizeof(tf->u.setup_resp));
+ tf->u.setup_resp.status_code = cpu_to_le16(status_code);
+ tf->u.setup_resp.dialog_token = dialog_token;
+ tf->u.setup_resp.capability = cpu_to_le16(capab);
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+
+ case WLAN_TDLS_SETUP_CONFIRM:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
+ skb_put(skb, sizeof(tf->u.setup_cfm));
+ tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
+ tf->u.setup_cfm.dialog_token = dialog_token;
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+ break;
+
+ case WLAN_TDLS_TEARDOWN:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_TEARDOWN;
+ skb_put(skb, sizeof(tf->u.teardown));
+ tf->u.teardown.reason_code = cpu_to_le16(status_code);
+ break;
+
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
+ skb_put(skb, sizeof(tf->u.discover_req));
+ tf->u.discover_req.dialog_token = dialog_token;
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
+{
+ struct ieee80211_tdls_lnkie *lnkid;
+
+ lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+ lnkid->ie_type = WLAN_EID_LINK_ID;
+ lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) -
+ sizeof(struct ieee_types_header);
+
+ memcpy(lnkid->bssid, bssid, ETH_ALEN);
+ memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
+ memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+}
+
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct timeval tv;
+ int ret;
+ u16 skb_len;
+
+ skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ MWIFIEX_SUPPORTED_RATES +
+ 3 + /* Qos Info */
+ sizeof(struct ieee_types_extcap) +
+ sizeof(struct ieee80211_ht_cap) +
+ sizeof(struct ieee_types_bss_co_2040) +
+ sizeof(struct ieee80211_ht_operation) +
+ sizeof(struct ieee80211_tdls_lnkie) +
+ extra_ies_len;
+
+ if (priv->adapter->is_hw_11ac_capable)
+ skb_len += sizeof(struct ieee_types_vht_cap) +
+ sizeof(struct ieee_types_vht_oper) +
+ sizeof(struct ieee_types_aid);
+
+ skb = dev_alloc_skb(skb_len);
+ if (!skb) {
+ dev_err(priv->adapter->dev,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies,
+ extra_ies_len);
+ mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
+ priv->cfg_bssid);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies,
+ extra_ies_len);
+ mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+ priv->cfg_bssid);
+ break;
+ }
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ skb->priority = MWIFIEX_PRIO_BK;
+ break;
+ default:
+ skb->priority = MWIFIEX_PRIO_VI;
+ break;
+ }
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+static int
+mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ int ret;
+ u16 capab;
+ struct ieee80211_ht_cap *ht_cap;
+ u8 radio, *pos;
+
+ capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+ mgmt = (void *)skb_put(skb, offsetof(struct ieee80211_mgmt, u));
+
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, peer, ETH_ALEN);
+ memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+
+ /* add address 4 */
+ pos = skb_put(skb, ETH_ALEN);
+
+ switch (action_code) {
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
+ mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+ mgmt->u.action.u.tdls_discover_resp.action_code =
+ WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+ mgmt->u.action.u.tdls_discover_resp.dialog_token =
+ dialog_token;
+ mgmt->u.action.u.tdls_discover_resp.capability =
+ cpu_to_le16(capab);
+ /* move back for addr4 */
+ memmove(pos + ETH_ALEN, &mgmt->u.action.category,
+ sizeof(mgmt->u.action.u.tdls_discover_resp));
+ /* init address 4 */
+ memcpy(pos, bc_addr, ETH_ALEN);
+
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct timeval tv;
+ u8 *pos;
+ u32 pkt_type, tx_control;
+ u16 pkt_len, skb_len;
+
+ skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ MWIFIEX_SUPPORTED_RATES +
+ sizeof(struct ieee_types_extcap) +
+ sizeof(struct ieee80211_ht_cap) +
+ sizeof(struct ieee_types_bss_co_2040) +
+ sizeof(struct ieee80211_ht_operation) +
+ sizeof(struct ieee80211_tdls_lnkie) +
+ extra_ies_len +
+ 3 + /* Qos Info */
+ ETH_ALEN; /* Address4 */
+
+ if (priv->adapter->is_hw_11ac_capable)
+ skb_len += sizeof(struct ieee_types_vht_cap) +
+ sizeof(struct ieee_types_vht_oper) +
+ sizeof(struct ieee_types_aid);
+
+ skb = dev_alloc_skb(skb_len);
+ if (!skb) {
+ dev_err(priv->adapter->dev,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ pkt_type = PKT_TYPE_MGMT;
+ tx_control = 0;
+ pos = skb_put(skb, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memset(pos, 0, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memcpy(pos, &pkt_type, sizeof(pkt_type));
+ memcpy(pos + sizeof(pkt_type), &tx_control, sizeof(tx_control));
+
+ if (mwifiex_construct_tdls_action_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ skb)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+
+ /* the TDLS link IE is always added last we are the responder */
+
+ mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+ priv->cfg_bssid);
+
+ skb->priority = MWIFIEX_PRIO_VI;
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+
+ pkt_len = skb->len - MWIFIEX_MGMT_FRAME_HEADER_SIZE - sizeof(pkt_len);
+ memcpy(skb->data + MWIFIEX_MGMT_FRAME_HEADER_SIZE, &pkt_len,
+ sizeof(pkt_len));
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+/* This function process tdls action frame from peer.
+ * Peer capabilities are stored into station node structure.
+ */
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *buf, int len)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ u8 *peer, *pos, *end;
+ u8 i, action, basic;
+ int ie_len = 0;
+
+ if (len < (sizeof(struct ethhdr) + 3))
+ return;
+ if (*(buf + sizeof(struct ethhdr)) != WLAN_TDLS_SNAP_RFTYPE)
+ return;
+ if (*(buf + sizeof(struct ethhdr) + 1) != WLAN_CATEGORY_TDLS)
+ return;
+
+ peer = buf + ETH_ALEN;
+ action = *(buf + sizeof(struct ethhdr) + 2);
+
+ /* just handle TDLS setup request/response/confirm */
+ if (action > WLAN_TDLS_SETUP_CONFIRM)
+ return;
+
+ dev_dbg(priv->adapter->dev,
+ "rx:tdls action: peer=%pM, action=%d\n", peer, action);
+
+ sta_ptr = mwifiex_add_sta_entry(priv, peer);
+ if (!sta_ptr)
+ return;
+
+ switch (action) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ if (len < (sizeof(struct ethhdr) + TDLS_REQ_FIX_LEN))
+ return;
+
+ pos = buf + sizeof(struct ethhdr) + 4;
+ /* payload 1+ category 1 + action 1 + dialog 1 */
+ sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+ ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+ pos += 2;
+ break;
+
+ case WLAN_TDLS_SETUP_RESPONSE:
+ if (len < (sizeof(struct ethhdr) + TDLS_RESP_FIX_LEN))
+ return;
+ /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
+ pos = buf + sizeof(struct ethhdr) + 6;
+ sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+ ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+ pos += 2;
+ break;
+
+ case WLAN_TDLS_SETUP_CONFIRM:
+ if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
+ return;
+ pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
+ ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+ break;
+ default:
+ dev_warn(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ return;
+ }
+
+ for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
+ if (pos + 2 + pos[1] > end)
+ break;
+
+ switch (*pos) {
+ case WLAN_EID_SUPP_RATES:
+ sta_ptr->tdls_cap.rates_len = pos[1];
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[i] = pos[i + 2];
+ break;
+
+ case WLAN_EID_EXT_SUPP_RATES:
+ basic = sta_ptr->tdls_cap.rates_len;
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
+ sta_ptr->tdls_cap.rates_len += pos[1];
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
+ sizeof(struct ieee80211_ht_cap));
+ sta_ptr->is_11n_enabled = 1;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
+ sizeof(struct ieee80211_ht_operation));
+ break;
+ case WLAN_EID_BSS_COEX_2040:
+ sta_ptr->tdls_cap.coex_2040 = pos[2];
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
+ sizeof(struct ieee_types_header) +
+ min_t(u8, pos[1], 8));
+ break;
+ case WLAN_EID_RSN:
+ memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
+ sizeof(struct ieee_types_header) + pos[1]);
+ break;
+ case WLAN_EID_QOS_CAPA:
+ sta_ptr->tdls_cap.qos_info = pos[2];
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ if (priv->adapter->is_hw_11ac_capable)
+ memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
+ sizeof(struct ieee80211_vht_operation));
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (priv->adapter->is_hw_11ac_capable) {
+ memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
+ sizeof(struct ieee80211_vht_cap));
+ sta_ptr->is_11ac_enabled = 1;
+ }
+ break;
+ case WLAN_EID_AID:
+ if (priv->adapter->is_hw_11ac_capable)
+ sta_ptr->tdls_cap.aid =
+ le16_to_cpu(*(__le16 *)(pos + 2));
+ default:
+ break;
+ }
+ }
+
+ return;
+}
+
+static int
+mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
+ dev_err(priv->adapter->dev,
+ "link absent for peer %pM; cannot config\n", peer);
+ return -EINVAL;
+ }
+
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_CONFIG_LINK;
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
+}
+
+static int
+mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
+ dev_dbg(priv->adapter->dev,
+ "Setup already in progress for peer %pM\n", peer);
+ return 0;
+ }
+
+ sta_ptr = mwifiex_add_sta_entry(priv, peer);
+ if (!sta_ptr)
+ return -ENOMEM;
+
+ sta_ptr->tdls_status = TDLS_SETUP_INPROGRESS;
+ mwifiex_hold_tdls_packets(priv, peer);
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_CREATE_LINK;
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
+}
+
+static int
+mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+ unsigned long flags;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr) {
+ if (sta_ptr->is_11n_enabled) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ }
+ mwifiex_del_sta_entry(priv, peer);
+ }
+
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+ return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
+}
+
+static int
+mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct ieee80211_mcs_info mcs;
+ unsigned long flags;
+ int i;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
+ dev_dbg(priv->adapter->dev,
+ "tdls: enable link %pM success\n", peer);
+
+ sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
+
+ mcs = sta_ptr->tdls_cap.ht_capb.mcs;
+ if (mcs.rx_mask[0] != 0xff)
+ sta_ptr->is_11n_enabled = true;
+ if (sta_ptr->is_11n_enabled) {
+ if (le16_to_cpu(sta_ptr->tdls_cap.ht_capb.cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU)
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_8K;
+ else
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] =
+ priv->aggr_prio_tbl[i].ampdu_user;
+ } else {
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+ }
+
+ memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "tdls: enable link %pM failed\n", peer);
+ if (sta_ptr) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_del_sta_entry(priv, peer);
+ }
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
+{
+ switch (action) {
+ case MWIFIEX_TDLS_ENABLE_LINK:
+ return mwifiex_tdls_process_enable_link(priv, peer);
+ case MWIFIEX_TDLS_DISABLE_LINK:
+ return mwifiex_tdls_process_disable_link(priv, peer);
+ case MWIFIEX_TDLS_CREATE_LINK:
+ return mwifiex_tdls_process_create_link(priv, peer);
+ case MWIFIEX_TDLS_CONFIG_LINK:
+ return mwifiex_tdls_process_config_link(priv, peer);
+ }
+ return 0;
+}
+
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *sta_ptr;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (sta_ptr)
+ return sta_ptr->tdls_status;
+
+ return TDLS_NOT_SETUP;
+}
+
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+ unsigned long flags;
+
+ if (list_empty(&priv->sta_list))
+ return;
+
+ list_for_each_entry(sta_ptr, &priv->sta_list, list) {
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+
+ if (sta_ptr->is_11n_enabled) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ }
+
+ mwifiex_restore_tdls_packets(priv, sta_ptr->mac_addr,
+ TDLS_LINK_TEARDOWN);
+ memcpy(&tdls_oper.peer_mac, sta_ptr->mac_addr, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper, false))
+ dev_warn(priv->adapter->dev,
+ "Disable link failed for TDLS peer %pM",
+ sta_ptr->mac_addr);
+ }
+
+ mwifiex_del_all_sta_list(priv);
+}
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 64424c81b44f..9be6544bdded 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -159,6 +159,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
struct cfg80211_ap_settings *params)
{
const u8 *ht_ie;
+ u16 cap_info;
if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
return;
@@ -168,6 +169,25 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
if (ht_ie) {
memcpy(&bss_cfg->ht_cap, ht_ie + 2,
sizeof(struct ieee80211_ht_cap));
+ cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
+ memset(&bss_cfg->ht_cap.mcs, 0,
+ priv->adapter->number_of_antenna);
+ switch (GET_RXSTBC(cap_info)) {
+ case MWIFIEX_RX_STBC1:
+ /* HT_CAP 1X1 mode */
+ memset(&bss_cfg->ht_cap.mcs, 0xff, 1);
+ break;
+ case MWIFIEX_RX_STBC12: /* fall through */
+ case MWIFIEX_RX_STBC123:
+ /* HT_CAP 2X2 mode */
+ memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ break;
+ default:
+ dev_warn(priv->adapter->dev,
+ "Unsupported RX-STBC, default to 2x2\n");
+ memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ break;
+ }
priv->ap_11n_enabled = 1;
} else {
memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
@@ -226,8 +246,8 @@ void mwifiex_set_vht_width(struct mwifiex_private *priv,
if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80)
vht_cfg.misc_config |= VHT_BW_80_160_80P80;
- mwifiex_send_cmd_sync(priv, HostCmd_CMD_11AC_CFG,
- HostCmd_ACT_GEN_SET, 0, &vht_cfg);
+ mwifiex_send_cmd(priv, HostCmd_CMD_11AC_CFG,
+ HostCmd_ACT_GEN_SET, 0, &vht_cfg, true);
return;
}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 718066577c6c..92e77a398ecf 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -21,126 +21,8 @@
#include "main.h"
#include "11n.h"
-/*
- * This function will return the pointer to station entry in station list
- * table which matches specified mac address.
- * This function should be called after acquiring RA list spinlock.
- * NULL is returned if station entry is not found in associated STA list.
- */
-struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
-
- if (!mac)
- return NULL;
-
- list_for_each_entry(node, &priv->sta_list, list) {
- if (!memcmp(node->mac_addr, mac, ETH_ALEN))
- return node;
- }
-
- return NULL;
-}
-
-/*
- * This function will add a sta_node entry to associated station list
- * table with the given mac address.
- * If entry exist already, existing entry is returned.
- * If received mac address is NULL, NULL is returned.
- */
-static struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
- unsigned long flags;
-
- if (!mac)
- return NULL;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
- node = mwifiex_get_sta_entry(priv, mac);
- if (node)
- goto done;
-
- node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
- if (!node)
- goto done;
-
- memcpy(node->mac_addr, mac, ETH_ALEN);
- list_add_tail(&node->list, &priv->sta_list);
-
-done:
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return node;
-}
-
-/*
- * This function will search for HT IE in association request IEs
- * and set station HT parameters accordingly.
- */
-static void
-mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
- int ies_len, struct mwifiex_sta_node *node)
-{
- const struct ieee80211_ht_cap *ht_cap;
-
- if (!ies)
- return;
- ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
- if (ht_cap) {
- node->is_11n_enabled = 1;
- node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
- IEEE80211_HT_CAP_MAX_AMSDU ?
- MWIFIEX_TX_DATA_BUF_SIZE_8K :
- MWIFIEX_TX_DATA_BUF_SIZE_4K;
- } else {
- node->is_11n_enabled = 0;
- }
- return;
-}
-
-/*
- * This function will delete a station entry from station list
- */
-static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
- node = mwifiex_get_sta_entry(priv, mac);
- if (node) {
- list_del(&node->list);
- kfree(node);
- }
-
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return;
-}
-
-/*
- * This function will delete all stations from associated station list.
- */
-static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
-{
- struct mwifiex_sta_node *node, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
- list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
- list_del(&node->list);
- kfree(node);
- }
-
- INIT_LIST_HEAD(&priv->sta_list);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return;
-}
/*
* This function handles AP interface specific events generated by firmware.
@@ -268,9 +150,9 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
case EVENT_ADDBA:
dev_dbg(adapter->dev, "event: ADDBA Request\n");
if (priv->media_connected)
- mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
- HostCmd_ACT_GEN_SET, 0,
- adapter->event_body);
+ mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
+ HostCmd_ACT_GEN_SET, 0,
+ adapter->event_body, false);
break;
case EVENT_DELBA:
dev_dbg(adapter->dev, "event: DELBA Request\n");
@@ -284,6 +166,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
}
break;
+ case EVENT_EXT_SCAN_REPORT:
+ dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ if (adapter->ext_scan)
+ return mwifiex_handle_event_ext_scan_report(priv,
+ adapter->event_skb->data);
+ break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 3c74eb254927..9a56bc61cb1d 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -284,27 +284,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
return 0;
}
- if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
- struct sk_buff_head list;
- struct sk_buff *rx_skb;
-
- __skb_queue_head_init(&list);
- skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
- skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
-
- ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev->iftype, 0, false);
-
- while (!skb_queue_empty(&list)) {
- rx_skb = __skb_dequeue(&list);
- ret = mwifiex_recv_packet(priv, rx_skb);
- if (ret)
- dev_err(adapter->dev,
- "AP:Rx A-MSDU failed");
- }
-
- return 0;
- } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+ if (rx_pkt_type == PKT_TYPE_MGMT) {
ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
dev_err(adapter->dev, "Rx of mgmt packet failed");
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 208748804a55..edbe4aff00d8 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -459,6 +459,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
* 'suspended' state and a 'disconnect' one.
*/
adapter->is_suspended = true;
+ adapter->hs_enabling = false;
if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
usb_kill_urb(card->rx_cmd.urb);
@@ -766,11 +767,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
case USB8897_PID_1:
case USB8897_PID_2:
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
break;
case USB8797_PID_1:
case USB8797_PID_2:
default:
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
break;
}
@@ -1024,7 +1027,6 @@ static void mwifiex_usb_cleanup_module(void)
if (usb_card && usb_card->adapter) {
struct mwifiex_adapter *adapter = usb_card->adapter;
- int i;
/* In case driver is removed when asynchronous FW downloading is
* in progress
@@ -1035,11 +1037,8 @@ static void mwifiex_usb_cleanup_module(void)
if (adapter->is_suspended)
mwifiex_usb_resume(usb_card->intf);
#endif
- for (i = 0; i < adapter->priv_num; i++)
- if ((GET_BSS_ROLE(adapter->priv[i]) ==
- MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
- mwifiex_deauthenticate(adapter->priv[i], NULL);
+
+ mwifiex_deauthenticate_all(adapter);
mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY),
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 9b82e225880c..c3824e37f3f2 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -72,7 +72,7 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
return -1;
}
- return mwifiex_send_cmd_sync(priv, cmd, HostCmd_ACT_GEN_SET, 0, NULL);
+ return mwifiex_send_cmd(priv, cmd, HostCmd_ACT_GEN_SET, 0, NULL, true);
}
EXPORT_SYMBOL_GPL(mwifiex_init_shutdown_fw);
@@ -104,6 +104,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
info->is_hs_configured = adapter->is_hs_configured;
info->hs_activated = adapter->hs_activated;
+ info->is_cmd_timedout = adapter->is_cmd_timedout;
info->num_cmd_host_to_card_failure
= adapter->dbg.num_cmd_host_to_card_failure;
info->num_cmd_sleep_cfm_host_to_card_failure
@@ -119,7 +120,6 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
info->num_cmd_assoc_failure =
adapter->dbg.num_cmd_assoc_failure;
info->num_tx_timeout = adapter->dbg.num_tx_timeout;
- info->num_cmd_timeout = adapter->dbg.num_cmd_timeout;
info->timeout_cmd_id = adapter->dbg.timeout_cmd_id;
info->timeout_cmd_act = adapter->dbg.timeout_cmd_act;
memcpy(info->last_cmd_id, adapter->dbg.last_cmd_id,
@@ -252,3 +252,117 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
return 0;
}
+
+/* This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+
+ if (!mac)
+ return NULL;
+
+ list_for_each_entry(node, &priv->sta_list, list) {
+ if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+ return node;
+ }
+
+ return NULL;
+}
+
+/* This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ if (!mac)
+ return NULL;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node)
+ goto done;
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node)
+ goto done;
+
+ memcpy(node->mac_addr, mac, ETH_ALEN);
+ list_add_tail(&node->list, &priv->sta_list);
+
+done:
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return node;
+}
+
+/* This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+ int ies_len, struct mwifiex_sta_node *node)
+{
+ const struct ieee80211_ht_cap *ht_cap;
+
+ if (!ies)
+ return;
+
+ ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+ if (ht_cap) {
+ node->is_11n_enabled = 1;
+ node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU ?
+ MWIFIEX_TX_DATA_BUF_SIZE_8K :
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ } else {
+ node->is_11n_enabled = 0;
+ }
+
+ return;
+}
+
+/* This function will delete a station entry from station list */
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
+
+/* This function will delete all stations from associated station list. */
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *node, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ INIT_LIST_HEAD(&priv->sta_list);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index cb2d0582bd36..ddae57021397 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -30,8 +30,24 @@ static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
}
-static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, dma_addr_t *buf_pa)
+struct mwifiex_dma_mapping {
+ dma_addr_t addr;
+ size_t len;
+};
+
+static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb,
+ struct mwifiex_dma_mapping *mapping)
{
- memcpy(buf_pa, skb->cb, sizeof(dma_addr_t));
+ memcpy(mapping, skb->cb, sizeof(*mapping));
}
+
+static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
+{
+ struct mwifiex_dma_mapping mapping;
+
+ MWIFIEX_SKB_PACB(skb, &mapping);
+
+ return mapping.addr;
+}
+
#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 981cf6e7c73b..0a7cc742aed7 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -37,8 +37,8 @@
/* Offset for TOS field in the IP header */
#define IPTOS_OFFSET 5
-static bool enable_tx_amsdu;
-module_param(enable_tx_amsdu, bool, 0644);
+static bool disable_tx_amsdu;
+module_param(disable_tx_amsdu, bool, 0644);
/* WMM information IE */
static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
@@ -64,21 +64,6 @@ static u8 tos_to_tid[] = {
0x07 /* 1 1 1 AC_VO */
};
-/*
- * This table inverses the tos_to_tid operation to get a priority
- * which is in sequential order, and can be compared.
- * Use this to compare the priority of two different TIDs.
- */
-static u8 tos_to_tid_inv[] = {
- 0x02, /* from tos_to_tid[2] = 0 */
- 0x00, /* from tos_to_tid[0] = 1 */
- 0x01, /* from tos_to_tid[1] = 2 */
- 0x03,
- 0x04,
- 0x05,
- 0x06,
- 0x07};
-
static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
/*
@@ -175,8 +160,15 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
break;
ra_list->is_11n_enabled = 0;
+ ra_list->tdls_link = false;
if (!mwifiex_queuing_ra_based(priv)) {
- ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+ if (mwifiex_get_tdls_link_status(priv, ra) ==
+ TDLS_SETUP_COMPLETE) {
+ ra_list->is_11n_enabled =
+ mwifiex_tdls_peer_11n_enabled(priv, ra);
+ } else {
+ ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+ }
} else {
ra_list->is_11n_enabled =
mwifiex_is_sta_11n_enabled(priv, node);
@@ -213,8 +205,9 @@ static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
* This function map ACs to TIDs.
*/
static void
-mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
+mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
{
+ struct mwifiex_wmm_desc *wmm = &priv->wmm;
u8 *queue_priority = wmm->queue_priority;
int i;
@@ -224,7 +217,7 @@ mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
}
for (i = 0; i < MAX_NUM_TID; ++i)
- tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
+ priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
}
@@ -285,7 +278,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
}
}
- mwifiex_wmm_queue_priorities_tid(&priv->wmm);
+ mwifiex_wmm_queue_priorities_tid(priv);
}
/*
@@ -388,8 +381,7 @@ mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
* AP is disabled (due to call admission control (ACM bit). Mapping
* of TID to AC is taken care of internally.
*/
-static u8
-mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
{
enum mwifiex_wmm_ac_e ac, ac_down;
u8 new_tid;
@@ -421,9 +413,17 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
continue;
for (i = 0; i < MAX_NUM_TID; ++i) {
- priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
- priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
- priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
+ if (!disable_tx_amsdu &&
+ adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
+ priv->aggr_prio_tbl[i].amsdu =
+ priv->tos_to_tid_inv[i];
+ else
+ priv->aggr_prio_tbl[i].amsdu =
+ BA_STREAM_NOT_ALLOWED;
+ priv->aggr_prio_tbl[i].ampdu_ap =
+ priv->tos_to_tid_inv[i];
+ priv->aggr_prio_tbl[i].ampdu_user =
+ priv->tos_to_tid_inv[i];
}
priv->aggr_prio_tbl[6].amsdu
@@ -546,6 +546,7 @@ void
mwifiex_clean_txrx(struct mwifiex_private *priv)
{
unsigned long flags;
+ struct sk_buff *skb, *tmp;
mwifiex_11n_cleanup_reorder_tbl(priv);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
@@ -563,6 +564,9 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
!priv->adapter->surprise_removed)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+
+ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
}
/*
@@ -591,7 +595,7 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
* If no such node is found, a new node is added first and then
* retrieved.
*/
-static struct mwifiex_ra_list_tbl *
+struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -641,6 +645,21 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list;
u8 ra[ETH_ALEN], tid_down;
unsigned long flags;
+ struct list_head list_head;
+ int tdls_status = TDLS_NOT_SETUP;
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+ struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+
+ memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+ ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
+ if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
+ dev_dbg(adapter->dev,
+ "TDLS setup packet for %pM. Don't block\n", ra);
+ else
+ tdls_status = mwifiex_get_tdls_link_status(priv, ra);
+ }
if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
@@ -659,12 +678,27 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
have only 1 raptr for a tid in case of infra */
if (!mwifiex_queuing_ra_based(priv) &&
!mwifiex_is_skb_mgmt_frame(skb)) {
- if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
- ra_list = list_first_entry(
- &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
- struct mwifiex_ra_list_tbl, list);
- else
- ra_list = NULL;
+ switch (tdls_status) {
+ case TDLS_SETUP_COMPLETE:
+ ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
+ ra);
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+ break;
+ case TDLS_SETUP_INPROGRESS:
+ skb_queue_tail(&priv->tdls_txq, skb);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ return;
+ default:
+ list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+ if (!list_empty(&list_head))
+ ra_list = list_first_entry(
+ &list_head, struct mwifiex_ra_list_tbl,
+ list);
+ else
+ ra_list = NULL;
+ break;
+ }
} else {
memcpy(ra, skb->data, ETH_ALEN);
if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
@@ -684,9 +718,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
ra_list->total_pkt_count++;
if (atomic_read(&priv->wmm.highest_queued_prio) <
- tos_to_tid_inv[tid_down])
+ priv->tos_to_tid_inv[tid_down])
atomic_set(&priv->wmm.highest_queued_prio,
- tos_to_tid_inv[tid_down]);
+ priv->tos_to_tid_inv[tid_down]);
atomic_inc(&priv->wmm.tx_pkts_queued);
@@ -1219,15 +1253,24 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
if (!ptr->is_11n_enabled ||
mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
- priv->wps.session_enable ||
- ((priv->sec_info.wpa_enabled ||
- priv->sec_info.wpa2_enabled) &&
- !priv->wpa_is_gtk_set)) {
- mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
- /* ra_list_spinlock has been freed in
- mwifiex_send_single_packet() */
+ priv->wps.session_enable) {
+ if (ptr->is_11n_enabled &&
+ mwifiex_is_ba_stream_setup(priv, ptr, tid) &&
+ mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) &&
+ mwifiex_is_amsdu_allowed(priv, tid) &&
+ mwifiex_is_11n_aggragation_possible(priv, ptr,
+ adapter->tx_buf_size))
+ mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
+ /* ra_list_spinlock has been freed in
+ * mwifiex_11n_aggregate_pkt()
+ */
+ else
+ mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ /* ra_list_spinlock has been freed in
+ * mwifiex_send_single_packet()
+ */
} else {
- if (mwifiex_is_ampdu_allowed(priv, tid) &&
+ if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
ptr->ba_pkt_count > ptr->ba_packet_thr) {
if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
mwifiex_create_ba_tbl(priv, ptr->ra, tid,
@@ -1240,7 +1283,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
mwifiex_send_delba(priv, tid_del, ra, 1);
}
}
- if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
+ if (mwifiex_is_amsdu_allowed(priv, tid) &&
mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 0f129d498fb1..83e42083ebff 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -34,6 +34,21 @@ enum ieee_types_wmm_ecw_bitmasks {
static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
/*
+ * This table inverses the tos_to_tid operation to get a priority
+ * which is in sequential order, and can be compared.
+ * Use this to compare the priority of two different TIDs.
+ */
+static const u8 tos_to_tid_inv[] = {
+ 0x02, /* from tos_to_tid[2] = 0 */
+ 0x00, /* from tos_to_tid[0] = 1 */
+ 0x01, /* from tos_to_tid[1] = 2 */
+ 0x03,
+ 0x04,
+ 0x05,
+ 0x06,
+ 0x07};
+
+/*
* This function retrieves the TID of the given RA list.
*/
static inline int
@@ -107,5 +122,8 @@ void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
const struct host_cmd_ds_command *resp);
+struct mwifiex_ra_list_tbl *
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr);
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 4987c3f942ce..3c0a0a86ba12 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -81,6 +81,9 @@ MODULE_PARM_DESC(ap_mode_default,
*/
#define MWL8K_HW_TIMER_REGISTER 0x0000a600
+#define BBU_RXRDY_CNT_REG 0x0000a860
+#define NOK_CCA_CNT_REG 0x0000a6a0
+#define BBU_AVG_NOISE_VAL 0x67
#define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \
MWL8K_A2H_INT_CHNL_SWITCHED | \
@@ -112,6 +115,8 @@ MODULE_PARM_DESC(ap_mode_default,
*/
#define MWL8K_NUM_AMPDU_STREAMS (TOTAL_HW_TX_QUEUES - 1)
+#define MWL8K_NUM_CHANS 18
+
struct rxd_ops {
int rxd_size;
void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
@@ -289,6 +294,12 @@ struct mwl8k_priv {
/* bitmap of running BSSes */
u32 running_bsses;
+
+ /* ACS related */
+ bool sw_scan_start;
+ struct ieee80211_channel *acs_chan;
+ unsigned long channel_time;
+ struct survey_info survey[MWL8K_NUM_CHANS];
};
#define MAX_WEP_KEY_LEN 13
@@ -396,6 +407,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
#define MWL8K_CMD_SET_HW_SPEC 0x0004
#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010
#define MWL8K_CMD_GET_STAT 0x0014
+#define MWL8K_CMD_BBP_REG_ACCESS 0x001a
#define MWL8K_CMD_RADIO_CONTROL 0x001c
#define MWL8K_CMD_RF_TX_POWER 0x001e
#define MWL8K_CMD_TX_POWER 0x001f
@@ -2987,6 +2999,47 @@ static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
}
/*
+ * CMD_BBP_REG_ACCESS.
+ */
+struct mwl8k_cmd_bbp_reg_access {
+ struct mwl8k_cmd_pkt header;
+ __le16 action;
+ __le16 offset;
+ u8 value;
+ u8 rsrv[3];
+} __packed;
+
+static int
+mwl8k_cmd_bbp_reg_access(struct ieee80211_hw *hw,
+ u16 action,
+ u16 offset,
+ u8 *value)
+{
+ struct mwl8k_cmd_bbp_reg_access *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_BBP_REG_ACCESS);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le16(action);
+ cmd->offset = cpu_to_le16(offset);
+
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+
+ if (!rc)
+ *value = cmd->value;
+ else
+ *value = 0;
+
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
* CMD_SET_POST_SCAN.
*/
struct mwl8k_cmd_set_post_scan {
@@ -3016,6 +3069,64 @@ mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
return rc;
}
+static int freq_to_idx(struct mwl8k_priv *priv, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ sband = priv->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static void mwl8k_update_survey(struct mwl8k_priv *priv,
+ struct ieee80211_channel *channel)
+{
+ u32 cca_cnt, rx_rdy;
+ s8 nf = 0, idx;
+ struct survey_info *survey;
+
+ idx = freq_to_idx(priv, priv->acs_chan->center_freq);
+ if (idx >= MWL8K_NUM_CHANS) {
+ wiphy_err(priv->hw->wiphy, "Failed to update survey\n");
+ return;
+ }
+
+ survey = &priv->survey[idx];
+
+ cca_cnt = ioread32(priv->regs + NOK_CCA_CNT_REG);
+ cca_cnt /= 1000; /* uSecs to mSecs */
+ survey->channel_time_busy = (u64) cca_cnt;
+
+ rx_rdy = ioread32(priv->regs + BBU_RXRDY_CNT_REG);
+ rx_rdy /= 1000; /* uSecs to mSecs */
+ survey->channel_time_rx = (u64) rx_rdy;
+
+ priv->channel_time = jiffies - priv->channel_time;
+ survey->channel_time = jiffies_to_msecs(priv->channel_time);
+
+ survey->channel = channel;
+
+ mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &nf);
+
+ /* Make sure sign is negative else ACS at hostapd fails */
+ survey->noise = nf * -1;
+
+ survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY |
+ SURVEY_INFO_CHANNEL_TIME_RX;
+}
+
/*
* CMD_SET_RF_CHANNEL.
*/
@@ -3033,6 +3144,7 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&conf->chandef);
struct mwl8k_cmd_set_rf_channel *cmd;
+ struct mwl8k_priv *priv = hw->priv;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -3049,13 +3161,29 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
else if (channel->band == IEEE80211_BAND_5GHZ)
cmd->channel_flags |= cpu_to_le32(0x00000004);
- if (channel_type == NL80211_CHAN_NO_HT ||
- channel_type == NL80211_CHAN_HT20)
+ if (!priv->sw_scan_start) {
+ if (channel_type == NL80211_CHAN_NO_HT ||
+ channel_type == NL80211_CHAN_HT20)
+ cmd->channel_flags |= cpu_to_le32(0x00000080);
+ else if (channel_type == NL80211_CHAN_HT40MINUS)
+ cmd->channel_flags |= cpu_to_le32(0x000001900);
+ else if (channel_type == NL80211_CHAN_HT40PLUS)
+ cmd->channel_flags |= cpu_to_le32(0x000000900);
+ } else {
cmd->channel_flags |= cpu_to_le32(0x00000080);
- else if (channel_type == NL80211_CHAN_HT40MINUS)
- cmd->channel_flags |= cpu_to_le32(0x000001900);
- else if (channel_type == NL80211_CHAN_HT40PLUS)
- cmd->channel_flags |= cpu_to_le32(0x000000900);
+ }
+
+ if (priv->sw_scan_start) {
+ /* Store current channel stats
+ * before switching to newer one.
+ * This will be processed only for AP fw.
+ */
+ if (priv->channel_time != 0)
+ mwl8k_update_survey(priv, priv->acs_chan);
+
+ priv->channel_time = jiffies;
+ priv->acs_chan = channel;
+ }
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -5263,6 +5391,27 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
{
struct mwl8k_priv *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_supported_band *sband;
+
+ if (priv->ap_fw) {
+ sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels)
+ return -ENOENT;
+
+ memcpy(survey, &priv->survey[idx], sizeof(*survey));
+ survey->channel = &sband->channels[idx];
+
+ return 0;
+ }
if (idx != 0)
return -ENOENT;
@@ -5406,6 +5555,40 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return rc;
}
+static void mwl8k_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ u8 tmp;
+
+ if (!priv->ap_fw)
+ return;
+
+ /* clear all stats */
+ priv->channel_time = 0;
+ ioread32(priv->regs + BBU_RXRDY_CNT_REG);
+ ioread32(priv->regs + NOK_CCA_CNT_REG);
+ mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &tmp);
+
+ priv->sw_scan_start = true;
+}
+
+static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ u8 tmp;
+
+ if (!priv->ap_fw)
+ return;
+
+ priv->sw_scan_start = false;
+
+ /* clear all stats */
+ priv->channel_time = 0;
+ ioread32(priv->regs + BBU_RXRDY_CNT_REG);
+ ioread32(priv->regs + NOK_CCA_CNT_REG);
+ mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &tmp);
+}
+
static const struct ieee80211_ops mwl8k_ops = {
.tx = mwl8k_tx,
.start = mwl8k_start,
@@ -5424,6 +5607,8 @@ static const struct ieee80211_ops mwl8k_ops = {
.get_stats = mwl8k_get_stats,
.get_survey = mwl8k_get_survey,
.ampdu_action = mwl8k_ampdu_action,
+ .sw_scan_start = mwl8k_sw_scan_start,
+ .sw_scan_complete = mwl8k_sw_scan_complete,
};
static void mwl8k_finalize_join_worker(struct work_struct *work)
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index d01edd2c50c5..a9e94b6db5b7 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -59,7 +59,8 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
for (i = 0; i < NUM_CHANNELS; i++) {
if (priv->channel_mask & (1 << i)) {
priv->channels[i].center_freq =
- ieee80211_dsss_chan_to_freq(i + 1);
+ ieee80211_channel_to_frequency(i + 1,
+ IEEE80211_BAND_2GHZ);
channels++;
}
}
@@ -177,7 +178,7 @@ static int orinoco_set_monitor_channel(struct wiphy *wiphy,
if (chandef->chan->band != IEEE80211_BAND_2GHZ)
return -EINVAL;
- channel = ieee80211_freq_to_dsss_chan(chandef->chan->center_freq);
+ channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
if ((channel < 1) || (channel > NUM_CHANNELS) ||
!(priv->channel_mask & (1 << (channel - 1))))
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index c09c8437c0b8..49300d04efdf 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -1193,7 +1193,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv)
goto out;
}
- freq = ieee80211_dsss_chan_to_freq(channel);
+ freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
out:
orinoco_unlock(priv, &flags);
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index e8c5714bfd11..e175b9b8561b 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -110,7 +110,8 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
break;
}
- freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
+ freq = ieee80211_channel_to_frequency(
+ le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
if (!channel) {
printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
@@ -146,7 +147,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
ie_len = len - sizeof(*bss);
ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
chan = ie ? ie[2] : 0;
- freq = ieee80211_dsss_chan_to_freq(chan);
+ freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
timestamp = le64_to_cpu(bss->timestamp);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 3b5508f982e8..b7a867b50b94 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -444,7 +444,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
for (i = 0; i < (6 - frq->e); i++)
denom *= 10;
- chan = ieee80211_freq_to_dsss_chan(frq->m / denom);
+ chan = ieee80211_frequency_to_channel(frq->m / denom);
}
if ((chan < 1) || (chan > NUM_CHANNELS) ||
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 6e635cfa24c8..043bd1c23c19 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -513,7 +513,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
if (!buf)
return -ENOMEM;
- left = block_size = min((size_t)P54U_FW_BLOCK, priv->fw->size);
+ left = block_size = min_t(size_t, P54U_FW_BLOCK, priv->fw->size);
strcpy(buf, p54u_firmware_upload_3887);
left -= strlen(p54u_firmware_upload_3887);
tmp += strlen(p54u_firmware_upload_3887);
@@ -1053,6 +1053,10 @@ static int p54u_probe(struct usb_interface *intf,
priv->upload_fw = p54u_upload_firmware_net2280;
}
err = p54u_load_firmware(dev, intf);
+ if (err) {
+ usb_put_dev(udev);
+ p54_free_common(dev);
+ }
return err;
}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 78fa64d3f223..ecbb0546cf3e 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -644,7 +644,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie);
if (wpa_ie_len > 0) {
iwe.cmd = IWEVGENIE;
- iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN);
+ iwe.u.data.length = min_t(size_t, wpa_ie_len, MAX_WPA_IE_LEN);
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, wpa_ie);
}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 5028557aa18a..39d22a154341 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1290,7 +1290,8 @@ static int set_channel(struct usbnet *usbdev, int channel)
if (is_associated(usbdev))
return 0;
- dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000;
+ dsconfig = 1000 *
+ ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
len = sizeof(config);
ret = rndis_query_oid(usbdev,
@@ -2835,7 +2836,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len, GFP_KERNEL);
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
- cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(usbdev->net, bssid,
+ get_current_channel(usbdev, NULL),
+ GFP_KERNEL);
kfree(info);
diff --git a/drivers/net/wireless/rsi/Kconfig b/drivers/net/wireless/rsi/Kconfig
new file mode 100644
index 000000000000..35245f994c10
--- /dev/null
+++ b/drivers/net/wireless/rsi/Kconfig
@@ -0,0 +1,30 @@
+config RSI_91X
+ tristate "Redpine Signals Inc 91x WLAN driver support"
+ depends on MAC80211
+ ---help---
+ This option enabes support for RSI 1x1 devices.
+ Select M (recommended), if you have a RSI 1x1 wireless module.
+
+config RSI_DEBUGFS
+ bool "Redpine Signals Inc debug support"
+ depends on RSI_91X
+ default y
+ ---help---
+ Say Y, if you would like to enable debug support. This option
+ creates debugfs entries
+
+config RSI_SDIO
+ tristate "Redpine Signals SDIO bus support"
+ depends on MMC && RSI_91X
+ default m
+ ---help---
+ This option enables the SDIO bus support in rsi drivers.
+ Select M (recommended), if you have a RSI 1x1 wireless module.
+
+config RSI_USB
+ tristate "Redpine Signals USB bus support"
+ depends on USB && RSI_91X
+ default m
+ ---help---
+ This option enables the USB bus support in rsi drivers.
+ Select M (recommended), if you have a RSI 1x1 wireless module.
diff --git a/drivers/net/wireless/rsi/Makefile b/drivers/net/wireless/rsi/Makefile
new file mode 100644
index 000000000000..25828b692756
--- /dev/null
+++ b/drivers/net/wireless/rsi/Makefile
@@ -0,0 +1,12 @@
+rsi_91x-y += rsi_91x_main.o
+rsi_91x-y += rsi_91x_core.o
+rsi_91x-y += rsi_91x_mac80211.o
+rsi_91x-y += rsi_91x_mgmt.o
+rsi_91x-y += rsi_91x_pkt.o
+rsi_91x-$(CONFIG_RSI_DEBUGFS) += rsi_91x_debugfs.o
+
+rsi_usb-y += rsi_91x_usb.o rsi_91x_usb_ops.o
+rsi_sdio-y += rsi_91x_sdio.o rsi_91x_sdio_ops.o
+obj-$(CONFIG_RSI_91X) += rsi_91x.o
+obj-$(CONFIG_RSI_SDIO) += rsi_sdio.o
+obj-$(CONFIG_RSI_USB) += rsi_usb.o
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
new file mode 100644
index 000000000000..e89535e86caf
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -0,0 +1,342 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+/**
+ * rsi_determine_min_weight_queue() - This function determines the queue with
+ * the min weight.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: q_num: Corresponding queue number.
+ */
+static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
+{
+ struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
+ u32 q_len = 0;
+ u8 ii = 0;
+
+ for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
+ q_len = skb_queue_len(&common->tx_queue[ii]);
+ if ((tx_qinfo[ii].pkt_contended) && q_len) {
+ common->min_weight = tx_qinfo[ii].weight;
+ break;
+ }
+ }
+ return ii;
+}
+
+/**
+ * rsi_recalculate_weights() - This function recalculates the weights
+ * corresponding to each queue.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: recontend_queue bool variable
+ */
+static bool rsi_recalculate_weights(struct rsi_common *common)
+{
+ struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
+ bool recontend_queue = false;
+ u8 ii = 0;
+ u32 q_len = 0;
+
+ for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
+ q_len = skb_queue_len(&common->tx_queue[ii]);
+ /* Check for the need of contention */
+ if (q_len) {
+ if (tx_qinfo[ii].pkt_contended) {
+ tx_qinfo[ii].weight =
+ ((tx_qinfo[ii].weight > common->min_weight) ?
+ tx_qinfo[ii].weight - common->min_weight : 0);
+ } else {
+ tx_qinfo[ii].pkt_contended = 1;
+ tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
+ recontend_queue = true;
+ }
+ } else { /* No packets so no contention */
+ tx_qinfo[ii].weight = 0;
+ tx_qinfo[ii].pkt_contended = 0;
+ }
+ }
+
+ return recontend_queue;
+}
+
+/**
+ * rsi_core_determine_hal_queue() - This function determines the queue from
+ * which packet has to be dequeued.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: q_num: Corresponding queue number on success.
+ */
+static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
+{
+ bool recontend_queue = false;
+ u32 q_len = 0;
+ u8 q_num = INVALID_QUEUE;
+ u8 ii, min = 0;
+
+ if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
+ if (!common->mgmt_q_block)
+ q_num = MGMT_SOFT_Q;
+ return q_num;
+ }
+
+ if (common->pkt_cnt != 0) {
+ --common->pkt_cnt;
+ return common->selected_qnum;
+ }
+
+get_queue_num:
+ q_num = 0;
+ recontend_queue = false;
+
+ q_num = rsi_determine_min_weight_queue(common);
+ q_len = skb_queue_len(&common->tx_queue[ii]);
+ ii = q_num;
+
+ /* Selecting the queue with least back off */
+ for (; ii < NUM_EDCA_QUEUES; ii++) {
+ if (((common->tx_qinfo[ii].pkt_contended) &&
+ (common->tx_qinfo[ii].weight < min)) && q_len) {
+ min = common->tx_qinfo[ii].weight;
+ q_num = ii;
+ }
+ }
+
+ common->tx_qinfo[q_num].pkt_contended = 0;
+ /* Adjust the back off values for all queues again */
+ recontend_queue = rsi_recalculate_weights(common);
+
+ q_len = skb_queue_len(&common->tx_queue[q_num]);
+ if (!q_len) {
+ /* If any queues are freshly contended and the selected queue
+ * doesn't have any packets
+ * then get the queue number again with fresh values
+ */
+ if (recontend_queue)
+ goto get_queue_num;
+
+ q_num = INVALID_QUEUE;
+ return q_num;
+ }
+
+ common->selected_qnum = q_num;
+ q_len = skb_queue_len(&common->tx_queue[q_num]);
+
+ switch (common->selected_qnum) {
+ case VO_Q:
+ if (q_len > MAX_CONTINUOUS_VO_PKTS)
+ common->pkt_cnt = (MAX_CONTINUOUS_VO_PKTS - 1);
+ else
+ common->pkt_cnt = --q_len;
+ break;
+
+ case VI_Q:
+ if (q_len > MAX_CONTINUOUS_VI_PKTS)
+ common->pkt_cnt = (MAX_CONTINUOUS_VI_PKTS - 1);
+ else
+ common->pkt_cnt = --q_len;
+
+ break;
+
+ default:
+ common->pkt_cnt = 0;
+ break;
+ }
+
+ return q_num;
+}
+
+/**
+ * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
+ * specified by the queue number.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None.
+ */
+static void rsi_core_queue_pkt(struct rsi_common *common,
+ struct sk_buff *skb)
+{
+ u8 q_num = skb->priority;
+ if (q_num >= NUM_SOFT_QUEUES) {
+ rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
+ __func__, q_num);
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ skb_queue_tail(&common->tx_queue[q_num], skb);
+}
+
+/**
+ * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
+ * specified by the queue number.
+ * @common: Pointer to the driver private structure.
+ * @q_num: Queue number.
+ *
+ * Return: Pointer to sk_buff structure.
+ */
+static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
+ u8 q_num)
+{
+ if (q_num >= NUM_SOFT_QUEUES) {
+ rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
+ __func__, q_num);
+ return NULL;
+ }
+
+ return skb_dequeue(&common->tx_queue[q_num]);
+}
+
+/**
+ * rsi_core_qos_processor() - This function is used to determine the wmm queue
+ * based on the backoff procedure. Data packets are
+ * dequeued from the selected hal queue and sent to
+ * the below layers.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+void rsi_core_qos_processor(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct sk_buff *skb;
+ unsigned long tstamp_1, tstamp_2;
+ u8 q_num;
+ int status;
+
+ tstamp_1 = jiffies;
+ while (1) {
+ q_num = rsi_core_determine_hal_queue(common);
+ rsi_dbg(DATA_TX_ZONE,
+ "%s: Queue number = %d\n", __func__, q_num);
+
+ if (q_num == INVALID_QUEUE) {
+ rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
+ break;
+ }
+
+ mutex_lock(&common->tx_rxlock);
+
+ status = adapter->check_hw_queue_status(adapter, q_num);
+ if ((status <= 0)) {
+ mutex_unlock(&common->tx_rxlock);
+ break;
+ }
+
+ if ((q_num < MGMT_SOFT_Q) &&
+ ((skb_queue_len(&common->tx_queue[q_num])) <=
+ MIN_DATA_QUEUE_WATER_MARK)) {
+ if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
+ ieee80211_wake_queue(adapter->hw,
+ WME_AC(q_num));
+ }
+
+ skb = rsi_core_dequeue_pkt(common, q_num);
+ if (skb == NULL) {
+ mutex_unlock(&common->tx_rxlock);
+ break;
+ }
+
+ if (q_num == MGMT_SOFT_Q)
+ status = rsi_send_mgmt_pkt(common, skb);
+ else
+ status = rsi_send_data_pkt(common, skb);
+
+ if (status) {
+ mutex_unlock(&common->tx_rxlock);
+ break;
+ }
+
+ common->tx_stats.total_tx_pkt_send[q_num]++;
+
+ tstamp_2 = jiffies;
+ mutex_unlock(&common->tx_rxlock);
+
+ if (tstamp_2 > tstamp_1 + (300 * HZ / 1000))
+ schedule();
+ }
+}
+
+/**
+ * rsi_core_xmit() - This function transmits the packets received from mac80211
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None.
+ */
+void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_tx_info *info;
+ struct skb_info *tx_params;
+ struct ieee80211_hdr *tmp_hdr = NULL;
+ u8 q_num, tid = 0;
+
+ if ((!skb) || (!skb->len)) {
+ rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
+ __func__);
+ goto xmit_fail;
+ }
+ info = IEEE80211_SKB_CB(skb);
+ tx_params = (struct skb_info *)info->driver_data;
+ tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
+
+ if (common->fsm_state != FSM_MAC_INIT_DONE) {
+ rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
+ goto xmit_fail;
+ }
+
+ if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
+ (ieee80211_is_ctl(tmp_hdr->frame_control))) {
+ q_num = MGMT_SOFT_Q;
+ skb->priority = q_num;
+ } else {
+ if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
+ tid = (skb->data[24] & IEEE80211_QOS_TID);
+ skb->priority = TID_TO_WME_AC(tid);
+ } else {
+ tid = IEEE80211_NONQOS_TID;
+ skb->priority = BE_Q;
+ }
+ q_num = skb->priority;
+ tx_params->tid = tid;
+ tx_params->sta_id = 0;
+ }
+
+ if ((q_num != MGMT_SOFT_Q) &&
+ ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
+ DATA_QUEUE_WATER_MARK)) {
+ if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
+ ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
+ rsi_set_event(&common->tx_thread.event);
+ goto xmit_fail;
+ }
+
+ rsi_core_queue_pkt(common, skb);
+ rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
+ rsi_set_event(&common->tx_thread.event);
+
+ return;
+
+xmit_fail:
+ rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
+ /* Dropping pkt here */
+ ieee80211_free_txskb(common->priv->hw, skb);
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
new file mode 100644
index 000000000000..7e4ef4554411
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -0,0 +1,339 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "rsi_debugfs.h"
+#include "rsi_sdio.h"
+
+/**
+ * rsi_sdio_stats_read() - This function returns the sdio status of the driver.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_stats_read(struct seq_file *seq, void *data)
+{
+ struct rsi_common *common = seq->private;
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+ seq_printf(seq, "total_sdio_interrupts: %d\n",
+ dev->rx_info.sdio_int_counter);
+ seq_printf(seq, "sdio_msdu_pending_intr_count: %d\n",
+ dev->rx_info.total_sdio_msdu_pending_intr);
+ seq_printf(seq, "sdio_buff_full_count : %d\n",
+ dev->rx_info.buf_full_counter);
+ seq_printf(seq, "sdio_buf_semi_full_count %d\n",
+ dev->rx_info.buf_semi_full_counter);
+ seq_printf(seq, "sdio_unknown_intr_count: %d\n",
+ dev->rx_info.total_sdio_unknown_intr);
+ /* RX Path Stats */
+ seq_printf(seq, "BUFFER FULL STATUS : %d\n",
+ dev->rx_info.buffer_full);
+ seq_printf(seq, "SEMI BUFFER FULL STATUS : %d\n",
+ dev->rx_info.semi_buffer_full);
+ seq_printf(seq, "MGMT BUFFER FULL STATUS : %d\n",
+ dev->rx_info.mgmt_buffer_full);
+ seq_printf(seq, "BUFFER FULL COUNTER : %d\n",
+ dev->rx_info.buf_full_counter);
+ seq_printf(seq, "BUFFER SEMI FULL COUNTER : %d\n",
+ dev->rx_info.buf_semi_full_counter);
+ seq_printf(seq, "MGMT BUFFER FULL COUNTER : %d\n",
+ dev->rx_info.mgmt_buf_full_counter);
+
+ return 0;
+}
+
+/**
+ * rsi_sdio_stats_open() - This funtion calls single open function of seq_file
+ * to open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_sdio_stats_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, rsi_sdio_stats_read, inode->i_private);
+}
+
+/**
+ * rsi_version_read() - This function gives driver and firmware version number.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_version_read(struct seq_file *seq, void *data)
+{
+ struct rsi_common *common = seq->private;
+
+ common->driver_ver.major = 0;
+ common->driver_ver.minor = 1;
+ common->driver_ver.release_num = 0;
+ common->driver_ver.patch_num = 0;
+ seq_printf(seq, "Driver : %x.%d.%d.%d\nLMAC : %d.%d.%d.%d\n",
+ common->driver_ver.major,
+ common->driver_ver.minor,
+ common->driver_ver.release_num,
+ common->driver_ver.patch_num,
+ common->fw_ver.major,
+ common->fw_ver.minor,
+ common->fw_ver.release_num,
+ common->fw_ver.patch_num);
+ return 0;
+}
+
+/**
+ * rsi_version_open() - This funtion calls single open function of seq_file to
+ * open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_version_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, rsi_version_read, inode->i_private);
+}
+
+/**
+ * rsi_stats_read() - This function return the status of the driver.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_stats_read(struct seq_file *seq, void *data)
+{
+ struct rsi_common *common = seq->private;
+
+ unsigned char fsm_state[][32] = {
+ "FSM_CARD_NOT_READY",
+ "FSM_BOOT_PARAMS_SENT",
+ "FSM_EEPROM_READ_MAC_ADDR",
+ "FSM_RESET_MAC_SENT",
+ "FSM_RADIO_CAPS_SENT",
+ "FSM_BB_RF_PROG_SENT",
+ "FSM_MAC_INIT_DONE"
+ };
+ seq_puts(seq, "==> RSI STA DRIVER STATUS <==\n");
+ seq_puts(seq, "DRIVER_FSM_STATE: ");
+
+ if (common->fsm_state <= FSM_MAC_INIT_DONE)
+ seq_printf(seq, "%s", fsm_state[common->fsm_state]);
+
+ seq_printf(seq, "(%d)\n\n", common->fsm_state);
+
+ /* Mgmt TX Path Stats */
+ seq_printf(seq, "total_mgmt_pkt_send : %d\n",
+ common->tx_stats.total_tx_pkt_send[MGMT_SOFT_Q]);
+ seq_printf(seq, "total_mgmt_pkt_queued : %d\n",
+ skb_queue_len(&common->tx_queue[4]));
+ seq_printf(seq, "total_mgmt_pkt_freed : %d\n",
+ common->tx_stats.total_tx_pkt_freed[MGMT_SOFT_Q]);
+
+ /* Data TX Path Stats */
+ seq_printf(seq, "total_data_vo_pkt_send: %8d\t",
+ common->tx_stats.total_tx_pkt_send[VO_Q]);
+ seq_printf(seq, "total_data_vo_pkt_queued: %8d\t",
+ skb_queue_len(&common->tx_queue[0]));
+ seq_printf(seq, "total_vo_pkt_freed: %8d\n",
+ common->tx_stats.total_tx_pkt_freed[VO_Q]);
+ seq_printf(seq, "total_data_vi_pkt_send: %8d\t",
+ common->tx_stats.total_tx_pkt_send[VI_Q]);
+ seq_printf(seq, "total_data_vi_pkt_queued: %8d\t",
+ skb_queue_len(&common->tx_queue[1]));
+ seq_printf(seq, "total_vi_pkt_freed: %8d\n",
+ common->tx_stats.total_tx_pkt_freed[VI_Q]);
+ seq_printf(seq, "total_data_be_pkt_send: %8d\t",
+ common->tx_stats.total_tx_pkt_send[BE_Q]);
+ seq_printf(seq, "total_data_be_pkt_queued: %8d\t",
+ skb_queue_len(&common->tx_queue[2]));
+ seq_printf(seq, "total_be_pkt_freed: %8d\n",
+ common->tx_stats.total_tx_pkt_freed[BE_Q]);
+ seq_printf(seq, "total_data_bk_pkt_send: %8d\t",
+ common->tx_stats.total_tx_pkt_send[BK_Q]);
+ seq_printf(seq, "total_data_bk_pkt_queued: %8d\t",
+ skb_queue_len(&common->tx_queue[3]));
+ seq_printf(seq, "total_bk_pkt_freed: %8d\n",
+ common->tx_stats.total_tx_pkt_freed[BK_Q]);
+
+ seq_puts(seq, "\n");
+ return 0;
+}
+
+/**
+ * rsi_stats_open() - This funtion calls single open function of seq_file to
+ * open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_stats_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, rsi_stats_read, inode->i_private);
+}
+
+/**
+ * rsi_debug_zone_read() - This function display the currently enabled debug zones.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_debug_zone_read(struct seq_file *seq, void *data)
+{
+ rsi_dbg(FSM_ZONE, "%x: rsi_enabled zone", rsi_zone_enabled);
+ seq_printf(seq, "The zones available are %#x\n",
+ rsi_zone_enabled);
+ return 0;
+}
+
+/**
+ * rsi_debug_read() - This funtion calls single open function of seq_file to
+ * open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_debug_read(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, rsi_debug_zone_read, inode->i_private);
+}
+
+/**
+ * rsi_debug_zone_write() - This function writes into hal queues as per user
+ * requirement.
+ * @filp: Pointer to the file structure.
+ * @buff: Pointer to the character buffer.
+ * @len: Length of the data to be written into buffer.
+ * @data: Pointer to the data.
+ *
+ * Return: len: Number of bytes read.
+ */
+static ssize_t rsi_debug_zone_write(struct file *filp,
+ const char __user *buff,
+ size_t len,
+ loff_t *data)
+{
+ unsigned long dbg_zone;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = kstrtoul_from_user(buff, len, 16, &dbg_zone);
+
+ if (ret)
+ return ret;
+
+ rsi_zone_enabled = dbg_zone;
+ return len;
+}
+
+#define FOPS(fopen) { \
+ .owner = THIS_MODULE, \
+ .open = (fopen), \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+#define FOPS_RW(fopen, fwrite) { \
+ .owner = THIS_MODULE, \
+ .open = (fopen), \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .write = (fwrite), \
+}
+
+static const struct rsi_dbg_files dev_debugfs_files[] = {
+ {"version", 0644, FOPS(rsi_version_open),},
+ {"stats", 0644, FOPS(rsi_stats_open),},
+ {"debug_zone", 0666, FOPS_RW(rsi_debug_read, rsi_debug_zone_write),},
+ {"sdio_stats", 0644, FOPS(rsi_sdio_stats_open),},
+};
+
+/**
+ * rsi_init_dbgfs() - This function initializes the dbgfs entry.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_init_dbgfs(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ struct rsi_debugfs *dev_dbgfs;
+ char devdir[6];
+ int ii;
+ const struct rsi_dbg_files *files;
+
+ dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL);
+ adapter->dfsentry = dev_dbgfs;
+
+ snprintf(devdir, sizeof(devdir), "%s",
+ wiphy_name(adapter->hw->wiphy));
+ dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
+
+ if (IS_ERR(dev_dbgfs->subdir)) {
+ if (dev_dbgfs->subdir == ERR_PTR(-ENODEV))
+ rsi_dbg(ERR_ZONE,
+ "%s:Debugfs has not been mounted\n", __func__);
+ else
+ rsi_dbg(ERR_ZONE, "debugfs:%s not created\n", devdir);
+
+ adapter->dfsentry = NULL;
+ kfree(dev_dbgfs);
+ return (int)PTR_ERR(dev_dbgfs->subdir);
+ } else {
+ for (ii = 0; ii < adapter->num_debugfs_entries; ii++) {
+ files = &dev_debugfs_files[ii];
+ dev_dbgfs->rsi_files[ii] =
+ debugfs_create_file(files->name,
+ files->perms,
+ dev_dbgfs->subdir,
+ common,
+ &files->fops);
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rsi_init_dbgfs);
+
+/**
+ * rsi_remove_dbgfs() - Removes the previously created dbgfs file entries
+ * in the reverse order of creation.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_remove_dbgfs(struct rsi_hw *adapter)
+{
+ struct rsi_debugfs *dev_dbgfs = adapter->dfsentry;
+
+ if (!dev_dbgfs)
+ return;
+
+ debugfs_remove_recursive(dev_dbgfs->subdir);
+}
+EXPORT_SYMBOL_GPL(rsi_remove_dbgfs);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
new file mode 100644
index 000000000000..84164747ace0
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -0,0 +1,1008 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "rsi_debugfs.h"
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+static const struct ieee80211_channel rsi_2ghz_channels[] = {
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+ .hw_value = 1 }, /* Channel 1 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+ .hw_value = 2 }, /* Channel 2 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+ .hw_value = 3 }, /* Channel 3 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+ .hw_value = 4 }, /* Channel 4 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+ .hw_value = 5 }, /* Channel 5 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+ .hw_value = 6 }, /* Channel 6 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+ .hw_value = 7 }, /* Channel 7 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+ .hw_value = 8 }, /* Channel 8 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+ .hw_value = 9 }, /* Channel 9 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+ .hw_value = 10 }, /* Channel 10 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+ .hw_value = 11 }, /* Channel 11 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+ .hw_value = 12 }, /* Channel 12 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+ .hw_value = 13 }, /* Channel 13 */
+ { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+ .hw_value = 14 }, /* Channel 14 */
+};
+
+static const struct ieee80211_channel rsi_5ghz_channels[] = {
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180,
+ .hw_value = 36, }, /* Channel 36 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200,
+ .hw_value = 40, }, /* Channel 40 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220,
+ .hw_value = 44, }, /* Channel 44 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240,
+ .hw_value = 48, }, /* Channel 48 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5260,
+ .hw_value = 52, }, /* Channel 52 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5280,
+ .hw_value = 56, }, /* Channel 56 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5300,
+ .hw_value = 60, }, /* Channel 60 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5320,
+ .hw_value = 64, }, /* Channel 64 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5500,
+ .hw_value = 100, }, /* Channel 100 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5520,
+ .hw_value = 104, }, /* Channel 104 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5540,
+ .hw_value = 108, }, /* Channel 108 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5560,
+ .hw_value = 112, }, /* Channel 112 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5580,
+ .hw_value = 116, }, /* Channel 116 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5600,
+ .hw_value = 120, }, /* Channel 120 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5620,
+ .hw_value = 124, }, /* Channel 124 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5640,
+ .hw_value = 128, }, /* Channel 128 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5660,
+ .hw_value = 132, }, /* Channel 132 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5680,
+ .hw_value = 136, }, /* Channel 136 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5700,
+ .hw_value = 140, }, /* Channel 140 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5745,
+ .hw_value = 149, }, /* Channel 149 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5765,
+ .hw_value = 153, }, /* Channel 153 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5785,
+ .hw_value = 157, }, /* Channel 157 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5805,
+ .hw_value = 161, }, /* Channel 161 */
+ { .band = IEEE80211_BAND_5GHZ, .center_freq = 5825,
+ .hw_value = 165, }, /* Channel 165 */
+};
+
+struct ieee80211_rate rsi_rates[12] = {
+ { .bitrate = STD_RATE_01 * 5, .hw_value = RSI_RATE_1 },
+ { .bitrate = STD_RATE_02 * 5, .hw_value = RSI_RATE_2 },
+ { .bitrate = STD_RATE_5_5 * 5, .hw_value = RSI_RATE_5_5 },
+ { .bitrate = STD_RATE_11 * 5, .hw_value = RSI_RATE_11 },
+ { .bitrate = STD_RATE_06 * 5, .hw_value = RSI_RATE_6 },
+ { .bitrate = STD_RATE_09 * 5, .hw_value = RSI_RATE_9 },
+ { .bitrate = STD_RATE_12 * 5, .hw_value = RSI_RATE_12 },
+ { .bitrate = STD_RATE_18 * 5, .hw_value = RSI_RATE_18 },
+ { .bitrate = STD_RATE_24 * 5, .hw_value = RSI_RATE_24 },
+ { .bitrate = STD_RATE_36 * 5, .hw_value = RSI_RATE_36 },
+ { .bitrate = STD_RATE_48 * 5, .hw_value = RSI_RATE_48 },
+ { .bitrate = STD_RATE_54 * 5, .hw_value = RSI_RATE_54 },
+};
+
+const u16 rsi_mcsrates[8] = {
+ RSI_RATE_MCS0, RSI_RATE_MCS1, RSI_RATE_MCS2, RSI_RATE_MCS3,
+ RSI_RATE_MCS4, RSI_RATE_MCS5, RSI_RATE_MCS6, RSI_RATE_MCS7
+};
+
+/**
+ * rsi_is_cipher_wep() - This function determines if the cipher is WEP or not.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: If cipher type is WEP, a value of 1 is returned, else 0.
+ */
+
+bool rsi_is_cipher_wep(struct rsi_common *common)
+{
+ if (((common->secinfo.gtk_cipher == WLAN_CIPHER_SUITE_WEP104) ||
+ (common->secinfo.gtk_cipher == WLAN_CIPHER_SUITE_WEP40)) &&
+ (!common->secinfo.ptk_cipher))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * rsi_register_rates_channels() - This function registers channels and rates.
+ * @adapter: Pointer to the adapter structure.
+ * @band: Operating band to be set.
+ *
+ * Return: None.
+ */
+static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
+{
+ struct ieee80211_supported_band *sbands = &adapter->sbands[band];
+ void *channels = NULL;
+
+ if (band == IEEE80211_BAND_2GHZ) {
+ channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
+ memcpy(channels,
+ rsi_2ghz_channels,
+ sizeof(rsi_2ghz_channels));
+ sbands->band = IEEE80211_BAND_2GHZ;
+ sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
+ sbands->bitrates = rsi_rates;
+ sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
+ } else {
+ channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
+ memcpy(channels,
+ rsi_5ghz_channels,
+ sizeof(rsi_5ghz_channels));
+ sbands->band = IEEE80211_BAND_5GHZ;
+ sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
+ sbands->bitrates = &rsi_rates[4];
+ sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4;
+ }
+
+ sbands->channels = channels;
+
+ memset(&sbands->ht_cap, 0, sizeof(struct ieee80211_sta_ht_cap));
+ sbands->ht_cap.ht_supported = true;
+ sbands->ht_cap.cap = (IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40);
+ sbands->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
+ sbands->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ sbands->ht_cap.mcs.rx_mask[0] = 0xff;
+ sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ /* sbands->ht_cap.mcs.rx_highest = 0x82; */
+}
+
+/**
+ * rsi_mac80211_attach() - This function is used to de-initialize the
+ * Mac80211 stack.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_mac80211_detach(struct rsi_hw *adapter)
+{
+ struct ieee80211_hw *hw = adapter->hw;
+
+ if (hw) {
+ ieee80211_stop_queues(hw);
+ ieee80211_unregister_hw(hw);
+ ieee80211_free_hw(hw);
+ }
+
+ rsi_remove_dbgfs(adapter);
+}
+EXPORT_SYMBOL_GPL(rsi_mac80211_detach);
+
+/**
+ * rsi_indicate_tx_status() - This function indicates the transmit status.
+ * @adapter: Pointer to the adapter structure.
+ * @skb: Pointer to the socket buffer structure.
+ * @status: Status
+ *
+ * Return: None.
+ */
+void rsi_indicate_tx_status(struct rsi_hw *adapter,
+ struct sk_buff *skb,
+ int status)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+
+ if (!status)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(adapter->hw, skb);
+}
+
+/**
+ * rsi_mac80211_tx() - This is the handler that 802.11 module calls for each
+ * transmitted frame.SKB contains the buffer starting
+ * from the IEEE 802.11 header.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @control: Pointer to the ieee80211_tx_control structure
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None
+ */
+static void rsi_mac80211_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ rsi_core_xmit(common, skb);
+}
+
+/**
+ * rsi_mac80211_start() - This is first handler that 802.11 module calls, since
+ * the driver init is complete by then, just
+ * returns success.
+ * @hw: Pointer to the ieee80211_hw structure.
+ *
+ * Return: 0 as success.
+ */
+static int rsi_mac80211_start(struct ieee80211_hw *hw)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ common->iface_down = false;
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_mac80211_stop() - This is the last handler that 802.11 module calls.
+ * @hw: Pointer to the ieee80211_hw structure.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_stop(struct ieee80211_hw *hw)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ common->iface_down = true;
+ mutex_unlock(&common->mutex);
+}
+
+/**
+ * rsi_mac80211_add_interface() - This function is called when a netdevice
+ * attached to the hardware is enabled.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ *
+ * Return: ret: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ int ret = -EOPNOTSUPP;
+
+ mutex_lock(&common->mutex);
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (!adapter->sc_nvifs) {
+ ++adapter->sc_nvifs;
+ adapter->vifs[0] = vif;
+ ret = rsi_set_vap_capabilities(common, STA_OPMODE);
+ }
+ break;
+ default:
+ rsi_dbg(ERR_ZONE,
+ "%s: Interface type %d not supported\n", __func__,
+ vif->type);
+ }
+ mutex_unlock(&common->mutex);
+
+ return ret;
+}
+
+/**
+ * rsi_mac80211_remove_interface() - This function notifies driver that an
+ * interface is going down.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ adapter->sc_nvifs--;
+
+ if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif)))
+ adapter->vifs[0] = NULL;
+ mutex_unlock(&common->mutex);
+}
+
+/**
+ * rsi_mac80211_config() - This function is a handler for configuration
+ * requests. The stack calls this function to
+ * change hardware configuration, e.g., channel.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @changed: Changed flags set.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_config(struct ieee80211_hw *hw,
+ u32 changed)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ int status = -EOPNOTSUPP;
+
+ mutex_lock(&common->mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+ u16 channel = curchan->hw_value;
+
+ rsi_dbg(INFO_ZONE,
+ "%s: Set channel: %d MHz type: %d channel_no %d\n",
+ __func__, curchan->center_freq,
+ curchan->flags, channel);
+ common->band = curchan->band;
+ status = rsi_set_channel(adapter->priv, channel);
+ }
+ mutex_unlock(&common->mutex);
+
+ return status;
+}
+
+/**
+ * rsi_get_connected_channel() - This function is used to get the current
+ * connected channel number.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: Current connected AP's channel number is returned.
+ */
+u16 rsi_get_connected_channel(struct rsi_hw *adapter)
+{
+ struct ieee80211_vif *vif = adapter->vifs[0];
+ if (vif) {
+ struct ieee80211_bss_conf *bss = &vif->bss_conf;
+ struct ieee80211_channel *channel = bss->chandef.chan;
+ return channel->hw_value;
+ }
+
+ return 0;
+}
+
+/**
+ * rsi_mac80211_bss_info_changed() - This function is a handler for config
+ * requests related to BSS parameters that
+ * may vary during BSS's lifespan.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @bss_conf: Pointer to the ieee80211_bss_conf structure.
+ * @changed: Changed flags set.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ if (changed & BSS_CHANGED_ASSOC) {
+ rsi_dbg(INFO_ZONE, "%s: Changed Association status: %d\n",
+ __func__, bss_conf->assoc);
+ rsi_inform_bss_status(common,
+ bss_conf->assoc,
+ bss_conf->bssid,
+ bss_conf->qos,
+ bss_conf->aid);
+ }
+ mutex_unlock(&common->mutex);
+}
+
+/**
+ * rsi_mac80211_conf_filter() - This function configure the device's RX filter.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @changed: Changed flags set.
+ * @total_flags: Total initial flags set.
+ * @multicast: Multicast.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_conf_filter(struct ieee80211_hw *hw,
+ u32 changed_flags,
+ u32 *total_flags,
+ u64 multicast)
+{
+ /* Not doing much here as of now */
+ *total_flags &= RSI_SUPP_FILTERS;
+}
+
+/**
+ * rsi_mac80211_conf_tx() - This function configures TX queue parameters
+ * (EDCF (aifs, cw_min, cw_max), bursting)
+ * for a hardware TX queue.
+ * @hw: Pointer to the ieee80211_hw structure
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @queue: Queue number.
+ * @params: Pointer to ieee80211_tx_queue_params structure.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ u8 idx = 0;
+
+ if (queue >= IEEE80211_NUM_ACS)
+ return 0;
+
+ rsi_dbg(INFO_ZONE,
+ "%s: Conf queue %d, aifs: %d, cwmin: %d cwmax: %d, txop: %d\n",
+ __func__, queue, params->aifs,
+ params->cw_min, params->cw_max, params->txop);
+
+ mutex_lock(&common->mutex);
+ /* Map into the way the f/w expects */
+ switch (queue) {
+ case IEEE80211_AC_VO:
+ idx = VO_Q;
+ break;
+ case IEEE80211_AC_VI:
+ idx = VI_Q;
+ break;
+ case IEEE80211_AC_BE:
+ idx = BE_Q;
+ break;
+ case IEEE80211_AC_BK:
+ idx = BK_Q;
+ break;
+ default:
+ idx = BE_Q;
+ break;
+ }
+
+ memcpy(&common->edca_params[idx],
+ params,
+ sizeof(struct ieee80211_tx_queue_params));
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_hal_key_config() - This function loads the keys into the firmware.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @key: Pointer to the ieee80211_key_conf structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_hal_key_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key)
+{
+ struct rsi_hw *adapter = hw->priv;
+ int status;
+ u8 key_type;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ key_type = RSI_PAIRWISE_KEY;
+ else
+ key_type = RSI_GROUP_KEY;
+
+ rsi_dbg(ERR_ZONE, "%s: Cipher 0x%x key_type: %d key_len: %d\n",
+ __func__, key->cipher, key_type, key->keylen);
+
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) ||
+ (key->cipher == WLAN_CIPHER_SUITE_WEP40)) {
+ status = rsi_hal_load_key(adapter->priv,
+ key->key,
+ key->keylen,
+ RSI_PAIRWISE_KEY,
+ key->keyidx,
+ key->cipher);
+ if (status)
+ return status;
+ }
+ return rsi_hal_load_key(adapter->priv,
+ key->key,
+ key->keylen,
+ key_type,
+ key->keyidx,
+ key->cipher);
+}
+
+/**
+ * rsi_mac80211_set_key() - This function sets type of key to be loaded.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @cmd: enum set_key_cmd.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @sta: Pointer to the ieee80211_sta structure.
+ * @key: Pointer to the ieee80211_key_conf structure.
+ *
+ * Return: status: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct security_info *secinfo = &common->secinfo;
+ int status;
+
+ mutex_lock(&common->mutex);
+ switch (cmd) {
+ case SET_KEY:
+ secinfo->security_enable = true;
+ status = rsi_hal_key_config(hw, vif, key);
+ if (status) {
+ mutex_unlock(&common->mutex);
+ return status;
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ secinfo->ptk_cipher = key->cipher;
+ else
+ secinfo->gtk_cipher = key->cipher;
+
+ key->hw_key_idx = key->keyidx;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ rsi_dbg(ERR_ZONE, "%s: RSI set_key\n", __func__);
+ break;
+
+ case DISABLE_KEY:
+ secinfo->security_enable = false;
+ rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__);
+ memset(key, 0, sizeof(struct ieee80211_key_conf));
+ status = rsi_hal_key_config(hw, vif, key);
+ break;
+
+ default:
+ status = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&common->mutex);
+ return status;
+}
+
+/**
+ * rsi_mac80211_ampdu_action() - This function selects the AMPDU action for
+ * the corresponding mlme_action flag and
+ * informs the f/w regarding this.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @action: ieee80211_ampdu_mlme_action enum.
+ * @sta: Pointer to the ieee80211_sta structure.
+ * @tid: Traffic identifier.
+ * @ssn: Pointer to ssn value.
+ * @buf_size: Buffer size (for kernel version > 2.6.38).
+ *
+ * Return: status: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ unsigned short tid,
+ unsigned short *ssn,
+ unsigned char buf_size)
+{
+ int status = -EOPNOTSUPP;
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ u16 seq_no = 0;
+ u8 ii = 0;
+
+ for (ii = 0; ii < RSI_MAX_VIFS; ii++) {
+ if (vif == adapter->vifs[ii])
+ break;
+ }
+
+ mutex_lock(&common->mutex);
+ rsi_dbg(INFO_ZONE, "%s: AMPDU action %d called\n", __func__, action);
+ if (ssn != NULL)
+ seq_no = *ssn;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ status = rsi_send_aggregation_params_frame(common,
+ tid,
+ seq_no,
+ buf_size,
+ STA_RX_ADDBA_DONE);
+ break;
+
+ case IEEE80211_AMPDU_RX_STOP:
+ status = rsi_send_aggregation_params_frame(common,
+ tid,
+ 0,
+ buf_size,
+ STA_RX_DELBA);
+ break;
+
+ case IEEE80211_AMPDU_TX_START:
+ common->vif_info[ii].seq_start = seq_no;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ status = rsi_send_aggregation_params_frame(common,
+ tid,
+ seq_no,
+ buf_size,
+ STA_TX_DELBA);
+ if (!status)
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ status = rsi_send_aggregation_params_frame(common,
+ tid,
+ common->vif_info[ii]
+ .seq_start,
+ buf_size,
+ STA_TX_ADDBA_DONE);
+ break;
+
+ default:
+ rsi_dbg(ERR_ZONE, "%s: Uknown AMPDU action\n", __func__);
+ break;
+ }
+
+ mutex_unlock(&common->mutex);
+ return status;
+}
+
+/**
+ * rsi_mac80211_set_rts_threshold() - This function sets rts threshold value.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @value: Rts threshold value.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_mac80211_set_rts_threshold(struct ieee80211_hw *hw,
+ u32 value)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ common->rts_threshold = value;
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_mac80211_set_rate_mask() - This function sets bitrate_mask to be used.
+ * @hw: Pointer to the ieee80211_hw structure
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @mask: Pointer to the cfg80211_bitrate_mask structure.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+
+ common->fixedrate_mask[IEEE80211_BAND_2GHZ] = 0;
+
+ if (mask->control[IEEE80211_BAND_2GHZ].legacy == 0xfff) {
+ common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
+ (mask->control[IEEE80211_BAND_2GHZ].ht_mcs[0] << 12);
+ } else {
+ common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
+ mask->control[IEEE80211_BAND_2GHZ].legacy;
+ }
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_fill_rx_status() - This function fills rx status in
+ * ieee80211_rx_status structure.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @skb: Pointer to the socket buffer structure.
+ * @common: Pointer to the driver private structure.
+ * @rxs: Pointer to the ieee80211_rx_status structure.
+ *
+ * Return: None.
+ */
+static void rsi_fill_rx_status(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rsi_common *common,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct skb_info *rx_params = (struct skb_info *)info->driver_data;
+ struct ieee80211_hdr *hdr;
+ char rssi = rx_params->rssi;
+ u8 hdrlen = 0;
+ u8 channel = rx_params->channel;
+ s32 freq;
+
+ hdr = ((struct ieee80211_hdr *)(skb->data));
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ memset(info, 0, sizeof(struct ieee80211_tx_info));
+
+ rxs->signal = -(rssi);
+
+ if (channel <= 14)
+ rxs->band = IEEE80211_BAND_2GHZ;
+ else
+ rxs->band = IEEE80211_BAND_5GHZ;
+
+ freq = ieee80211_channel_to_frequency(channel, rxs->band);
+
+ if (freq)
+ rxs->freq = freq;
+
+ if (ieee80211_has_protected(hdr->frame_control)) {
+ if (rsi_is_cipher_wep(common)) {
+ memmove(skb->data + 4, skb->data, hdrlen);
+ skb_pull(skb, 4);
+ } else {
+ memmove(skb->data + 8, skb->data, hdrlen);
+ skb_pull(skb, 8);
+ rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+ }
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ rxs->flag |= RX_FLAG_IV_STRIPPED;
+ }
+}
+
+/**
+ * rsi_indicate_pkt_to_os() - This function sends recieved packet to mac80211.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None.
+ */
+void rsi_indicate_pkt_to_os(struct rsi_common *common,
+ struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hw *hw = adapter->hw;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if ((common->iface_down) || (!adapter->sc_nvifs)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /* filling in the ieee80211_rx_status flags */
+ rsi_fill_rx_status(hw, skb, common, rx_status);
+
+ ieee80211_rx_irqsafe(hw, skb);
+}
+
+static void rsi_set_min_rate(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct rsi_common *common)
+{
+ u8 band = hw->conf.chandef.chan->band;
+ u8 ii;
+ u32 rate_bitmap;
+ bool matched = false;
+
+ common->bitrate_mask[band] = sta->supp_rates[band];
+
+ rate_bitmap = (common->fixedrate_mask[band] & sta->supp_rates[band]);
+
+ if (rate_bitmap & 0xfff) {
+ /* Find out the min rate */
+ for (ii = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
+ if (rate_bitmap & BIT(ii)) {
+ common->min_rate = rsi_rates[ii].hw_value;
+ matched = true;
+ break;
+ }
+ }
+ }
+
+ common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
+
+ if ((common->vif_info[0].is_ht) && (rate_bitmap >> 12)) {
+ for (ii = 0; ii < ARRAY_SIZE(rsi_mcsrates); ii++) {
+ if ((rate_bitmap >> 12) & BIT(ii)) {
+ common->min_rate = rsi_mcsrates[ii];
+ matched = true;
+ break;
+ }
+ }
+ }
+
+ if (!matched)
+ common->min_rate = 0xffff;
+}
+
+/**
+ * rsi_mac80211_sta_add() - This function notifies driver about a peer getting
+ * connected.
+ * @hw: pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @sta: Pointer to the ieee80211_sta structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+
+ rsi_set_min_rate(hw, sta, common);
+
+ if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) {
+ common->vif_info[0].sgi = true;
+ }
+
+ if (sta->ht_cap.ht_supported)
+ ieee80211_start_tx_ba_session(sta, 0, 0);
+
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+/**
+ * rsi_mac80211_sta_remove() - This function notifies driver about a peer
+ * getting disconnected.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @sta: Pointer to the ieee80211_sta structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ mutex_lock(&common->mutex);
+ /* Resetting all the fields to default values */
+ common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0;
+ common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0;
+ common->min_rate = 0xffff;
+ common->vif_info[0].is_ht = false;
+ common->vif_info[0].sgi = false;
+ common->vif_info[0].seq_start = 0;
+ common->secinfo.ptk_cipher = 0;
+ common->secinfo.gtk_cipher = 0;
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+static struct ieee80211_ops mac80211_ops = {
+ .tx = rsi_mac80211_tx,
+ .start = rsi_mac80211_start,
+ .stop = rsi_mac80211_stop,
+ .add_interface = rsi_mac80211_add_interface,
+ .remove_interface = rsi_mac80211_remove_interface,
+ .config = rsi_mac80211_config,
+ .bss_info_changed = rsi_mac80211_bss_info_changed,
+ .conf_tx = rsi_mac80211_conf_tx,
+ .configure_filter = rsi_mac80211_conf_filter,
+ .set_key = rsi_mac80211_set_key,
+ .set_rts_threshold = rsi_mac80211_set_rts_threshold,
+ .set_bitrate_mask = rsi_mac80211_set_rate_mask,
+ .ampdu_action = rsi_mac80211_ampdu_action,
+ .sta_add = rsi_mac80211_sta_add,
+ .sta_remove = rsi_mac80211_sta_remove,
+};
+
+/**
+ * rsi_mac80211_attach() - This function is used to initialize Mac80211 stack.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_mac80211_attach(struct rsi_common *common)
+{
+ int status = 0;
+ struct ieee80211_hw *hw = NULL;
+ struct wiphy *wiphy = NULL;
+ struct rsi_hw *adapter = common->priv;
+ u8 addr_mask[ETH_ALEN] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x3};
+
+ rsi_dbg(INIT_ZONE, "%s: Performing mac80211 attach\n", __func__);
+
+ hw = ieee80211_alloc_hw(sizeof(struct rsi_hw), &mac80211_ops);
+ if (!hw) {
+ rsi_dbg(ERR_ZONE, "%s: ieee80211 hw alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ wiphy = hw->wiphy;
+
+ SET_IEEE80211_DEV(hw, adapter->device);
+
+ hw->priv = adapter;
+ adapter->hw = hw;
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ 0;
+
+ hw->queues = MAX_HW_QUEUES;
+ hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
+
+ hw->max_rates = 1;
+ hw->max_rate_tries = MAX_RETRIES;
+
+ hw->max_tx_aggregation_subframes = 6;
+ rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ);
+ hw->rate_control_algorithm = "AARF";
+
+ SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
+ ether_addr_copy(hw->wiphy->addr_mask, addr_mask);
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->retry_short = RETRY_SHORT;
+ wiphy->retry_long = RETRY_LONG;
+ wiphy->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
+ wiphy->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ wiphy->flags = 0;
+
+ wiphy->available_antennas_rx = 1;
+ wiphy->available_antennas_tx = 1;
+ wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &adapter->sbands[IEEE80211_BAND_2GHZ];
+
+ status = ieee80211_register_hw(hw);
+ if (status)
+ return status;
+
+ return rsi_init_dbgfs(adapter);
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
new file mode 100644
index 000000000000..8810862ae826
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -0,0 +1,295 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+u32 rsi_zone_enabled = /* INFO_ZONE |
+ INIT_ZONE |
+ MGMT_TX_ZONE |
+ MGMT_RX_ZONE |
+ DATA_TX_ZONE |
+ DATA_RX_ZONE |
+ FSM_ZONE |
+ ISR_ZONE | */
+ ERR_ZONE |
+ 0;
+EXPORT_SYMBOL_GPL(rsi_zone_enabled);
+
+/**
+ * rsi_dbg() - This function outputs informational messages.
+ * @zone: Zone of interest for output message.
+ * @fmt: printf-style format for output message.
+ *
+ * Return: none
+ */
+void rsi_dbg(u32 zone, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (zone & rsi_zone_enabled)
+ pr_info("%pV", &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(rsi_dbg);
+
+/**
+ * rsi_prepare_skb() - This function prepares the skb.
+ * @common: Pointer to the driver private structure.
+ * @buffer: Pointer to the packet data.
+ * @pkt_len: Length of the packet.
+ * @extended_desc: Extended descriptor.
+ *
+ * Return: Successfully skb.
+ */
+static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
+ u8 *buffer,
+ u32 pkt_len,
+ u8 extended_desc)
+{
+ struct ieee80211_tx_info *info;
+ struct skb_info *rx_params;
+ struct sk_buff *skb = NULL;
+ u8 payload_offset;
+
+ if (WARN(!pkt_len, "%s: Dummy pkt received", __func__))
+ return NULL;
+
+ if (pkt_len > (RSI_RCV_BUFFER_LEN * 4)) {
+ rsi_dbg(ERR_ZONE, "%s: Pkt size > max rx buf size %d\n",
+ __func__, pkt_len);
+ pkt_len = RSI_RCV_BUFFER_LEN * 4;
+ }
+
+ pkt_len -= extended_desc;
+ skb = dev_alloc_skb(pkt_len + FRAME_DESC_SZ);
+ if (skb == NULL)
+ return NULL;
+
+ payload_offset = (extended_desc + FRAME_DESC_SZ);
+ skb_put(skb, pkt_len);
+ memcpy((skb->data), (buffer + payload_offset), skb->len);
+
+ info = IEEE80211_SKB_CB(skb);
+ rx_params = (struct skb_info *)info->driver_data;
+ rx_params->rssi = rsi_get_rssi(buffer);
+ rx_params->channel = rsi_get_connected_channel(common->priv);
+
+ return skb;
+}
+
+/**
+ * rsi_read_pkt() - This function reads frames from the card.
+ * @common: Pointer to the driver private structure.
+ * @rcv_pkt_len: Received pkt length. In case of USB it is 0.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len)
+{
+ u8 *frame_desc = NULL, extended_desc = 0;
+ u32 index, length = 0, queueno = 0;
+ u16 actual_length = 0, offset;
+ struct sk_buff *skb = NULL;
+
+ index = 0;
+ do {
+ frame_desc = &common->rx_data_pkt[index];
+ actual_length = *(u16 *)&frame_desc[0];
+ offset = *(u16 *)&frame_desc[2];
+
+ queueno = rsi_get_queueno(frame_desc, offset);
+ length = rsi_get_length(frame_desc, offset);
+ extended_desc = rsi_get_extended_desc(frame_desc, offset);
+
+ switch (queueno) {
+ case RSI_WIFI_DATA_Q:
+ skb = rsi_prepare_skb(common,
+ (frame_desc + offset),
+ length,
+ extended_desc);
+ if (skb == NULL)
+ goto fail;
+
+ rsi_indicate_pkt_to_os(common, skb);
+ break;
+
+ case RSI_WIFI_MGMT_Q:
+ rsi_mgmt_pkt_recv(common, (frame_desc + offset));
+ break;
+
+ default:
+ rsi_dbg(ERR_ZONE, "%s: pkt from invalid queue: %d\n",
+ __func__, queueno);
+ goto fail;
+ }
+
+ index += actual_length;
+ rcv_pkt_len -= actual_length;
+ } while (rcv_pkt_len > 0);
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(rsi_read_pkt);
+
+/**
+ * rsi_tx_scheduler_thread() - This function is a kernel thread to send the
+ * packets to the device.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+static void rsi_tx_scheduler_thread(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ u32 timeout = EVENT_WAIT_FOREVER;
+
+ do {
+ if (adapter->determine_event_timeout)
+ timeout = adapter->determine_event_timeout(adapter);
+ rsi_wait_event(&common->tx_thread.event, timeout);
+ rsi_reset_event(&common->tx_thread.event);
+
+ if (common->init_done)
+ rsi_core_qos_processor(common);
+ } while (atomic_read(&common->tx_thread.thread_done) == 0);
+ complete_and_exit(&common->tx_thread.completion, 0);
+}
+
+/**
+ * rsi_91x_init() - This function initializes os interface operations.
+ * @void: Void.
+ *
+ * Return: Pointer to the adapter structure on success, NULL on failure .
+ */
+struct rsi_hw *rsi_91x_init(void)
+{
+ struct rsi_hw *adapter = NULL;
+ struct rsi_common *common = NULL;
+ u8 ii = 0;
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return NULL;
+
+ adapter->priv = kzalloc(sizeof(*common), GFP_KERNEL);
+ if (adapter->priv == NULL) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of memory\n",
+ __func__);
+ kfree(adapter);
+ return NULL;
+ } else {
+ common = adapter->priv;
+ common->priv = adapter;
+ }
+
+ for (ii = 0; ii < NUM_SOFT_QUEUES; ii++)
+ skb_queue_head_init(&common->tx_queue[ii]);
+
+ rsi_init_event(&common->tx_thread.event);
+ mutex_init(&common->mutex);
+ mutex_init(&common->tx_rxlock);
+
+ if (rsi_create_kthread(common,
+ &common->tx_thread,
+ rsi_tx_scheduler_thread,
+ "Tx-Thread")) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
+ goto err;
+ }
+
+ common->init_done = true;
+ return adapter;
+
+err:
+ kfree(common);
+ kfree(adapter);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rsi_91x_init);
+
+/**
+ * rsi_91x_deinit() - This function de-intializes os intf operations.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_91x_deinit(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ u8 ii;
+
+ rsi_dbg(INFO_ZONE, "%s: Performing deinit os ops\n", __func__);
+
+ rsi_kill_thread(&common->tx_thread);
+
+ for (ii = 0; ii < NUM_SOFT_QUEUES; ii++)
+ skb_queue_purge(&common->tx_queue[ii]);
+
+ common->init_done = false;
+
+ kfree(common);
+ kfree(adapter->rsi_dev);
+ kfree(adapter);
+}
+EXPORT_SYMBOL_GPL(rsi_91x_deinit);
+
+/**
+ * rsi_91x_hal_module_init() - This function is invoked when the module is
+ * loaded into the kernel.
+ * It registers the client driver.
+ * @void: Void.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_91x_hal_module_init(void)
+{
+ rsi_dbg(INIT_ZONE, "%s: Module init called\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_91x_hal_module_exit() - This function is called at the time of
+ * removing/unloading the module.
+ * It unregisters the client driver.
+ * @void: Void.
+ *
+ * Return: None.
+ */
+static void rsi_91x_hal_module_exit(void)
+{
+ rsi_dbg(INIT_ZONE, "%s: Module exit called\n", __func__);
+}
+
+module_init(rsi_91x_hal_module_init);
+module_exit(rsi_91x_hal_module_exit);
+MODULE_AUTHOR("Redpine Signals Inc");
+MODULE_DESCRIPTION("Station driver for RSI 91x devices");
+MODULE_SUPPORTED_DEVICE("RSI-91x");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
new file mode 100644
index 000000000000..2361a6849ad7
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -0,0 +1,1304 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+static struct bootup_params boot_params_20 = {
+ .magic_number = cpu_to_le16(0x5aa5),
+ .crystal_good_time = 0x0,
+ .valid = cpu_to_le32(VALID_20),
+ .reserved_for_valids = 0x0,
+ .bootup_mode_info = 0x0,
+ .digital_loop_back_params = 0x0,
+ .rtls_timestamp_en = 0x0,
+ .host_spi_intr_cfg = 0x0,
+ .device_clk_info = {{
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
+ (TA_PLL_M_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
+ (PLL960_N_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = cpu_to_le16(BIT(3)),
+ .bbp_lmac_clk_reg_val = cpu_to_le16(0x121),
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ },
+ {
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
+ (TA_PLL_M_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
+ (PLL960_N_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = 0x0,
+ .bbp_lmac_clk_reg_val = 0x0,
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ },
+ {
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
+ (TA_PLL_M_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
+ (PLL960_N_VAL_20)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = 0x0,
+ .bbp_lmac_clk_reg_val = 0x0,
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ } },
+ .buckboost_wakeup_cnt = 0x0,
+ .pmu_wakeup_wait = 0x0,
+ .shutdown_wait_time = 0x0,
+ .pmu_slp_clkout_sel = 0x0,
+ .wdt_prog_value = 0x0,
+ .wdt_soc_rst_delay = 0x0,
+ .dcdc_operation_mode = 0x0,
+ .soc_reset_wait_cnt = 0x0
+};
+
+static struct bootup_params boot_params_40 = {
+ .magic_number = cpu_to_le16(0x5aa5),
+ .crystal_good_time = 0x0,
+ .valid = cpu_to_le32(VALID_40),
+ .reserved_for_valids = 0x0,
+ .bootup_mode_info = 0x0,
+ .digital_loop_back_params = 0x0,
+ .rtls_timestamp_en = 0x0,
+ .host_spi_intr_cfg = 0x0,
+ .device_clk_info = {{
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
+ (TA_PLL_M_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
+ (PLL960_N_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = cpu_to_le16(0x09),
+ .bbp_lmac_clk_reg_val = cpu_to_le16(0x1121),
+ .umac_clock_reg_config = cpu_to_le16(0x48),
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ },
+ {
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
+ (TA_PLL_M_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
+ (PLL960_N_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = 0x0,
+ .bbp_lmac_clk_reg_val = 0x0,
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ },
+ {
+ .pll_config_g = {
+ .tapll_info_g = {
+ .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
+ (TA_PLL_M_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
+ },
+ .pll960_info_g = {
+ .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
+ (PLL960_N_VAL_40)),
+ .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
+ .pll_reg_3 = 0x0,
+ },
+ .afepll_info_g = {
+ .pll_reg = cpu_to_le16(0x9f0),
+ }
+ },
+ .switch_clk_g = {
+ .switch_clk_info = 0x0,
+ .bbp_lmac_clk_reg_val = 0x0,
+ .umac_clock_reg_config = 0x0,
+ .qspi_uart_clock_reg_config = 0x0
+ }
+ } },
+ .buckboost_wakeup_cnt = 0x0,
+ .pmu_wakeup_wait = 0x0,
+ .shutdown_wait_time = 0x0,
+ .pmu_slp_clkout_sel = 0x0,
+ .wdt_prog_value = 0x0,
+ .wdt_soc_rst_delay = 0x0,
+ .dcdc_operation_mode = 0x0,
+ .soc_reset_wait_cnt = 0x0
+};
+
+static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
+
+/**
+ * rsi_set_default_parameters() - This function sets default parameters.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: none
+ */
+static void rsi_set_default_parameters(struct rsi_common *common)
+{
+ common->band = IEEE80211_BAND_2GHZ;
+ common->channel_width = BW_20MHZ;
+ common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ common->channel = 1;
+ common->min_rate = 0xffff;
+ common->fsm_state = FSM_CARD_NOT_READY;
+ common->iface_down = true;
+}
+
+/**
+ * rsi_set_contention_vals() - This function sets the contention values for the
+ * backoff procedure.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+static void rsi_set_contention_vals(struct rsi_common *common)
+{
+ u8 ii = 0;
+
+ for (; ii < NUM_EDCA_QUEUES; ii++) {
+ common->tx_qinfo[ii].wme_params =
+ (((common->edca_params[ii].cw_min / 2) +
+ (common->edca_params[ii].aifs)) *
+ WMM_SHORT_SLOT_TIME + SIFS_DURATION);
+ common->tx_qinfo[ii].weight = common->tx_qinfo[ii].wme_params;
+ common->tx_qinfo[ii].pkt_contended = 0;
+ }
+}
+
+/**
+ * rsi_send_internal_mgmt_frame() - This function sends management frames to
+ * firmware.Also schedules packet to queue
+ * for transmission.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_send_internal_mgmt_frame(struct rsi_common *common,
+ struct sk_buff *skb)
+{
+ struct skb_info *tx_params;
+
+ if (skb == NULL) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__);
+ return -ENOMEM;
+ }
+ tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data;
+ tx_params->flags |= INTERNAL_MGMT_PKT;
+ skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb);
+ rsi_set_event(&common->tx_thread.event);
+ return 0;
+}
+
+/**
+ * rsi_load_radio_caps() - This function is used to send radio capabilities
+ * values to firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+static int rsi_load_radio_caps(struct rsi_common *common)
+{
+ struct rsi_radio_caps *radio_caps;
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hw *hw = adapter->hw;
+ u16 inx = 0;
+ u8 ii;
+ u8 radio_id = 0;
+ u16 gc[20] = {0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0,
+ 0xf0, 0xf0, 0xf0, 0xf0};
+ struct ieee80211_conf *conf = &hw->conf;
+ struct sk_buff *skb;
+
+ rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_radio_caps));
+
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_radio_caps));
+ radio_caps = (struct rsi_radio_caps *)skb->data;
+
+ radio_caps->desc_word[1] = cpu_to_le16(RADIO_CAPABILITIES);
+ radio_caps->desc_word[4] = cpu_to_le16(RSI_RF_TYPE << 8);
+
+ if (common->channel_width == BW_40MHZ) {
+ radio_caps->desc_word[7] |= cpu_to_le16(RSI_LMAC_CLOCK_80MHZ);
+ radio_caps->desc_word[7] |= cpu_to_le16(RSI_ENABLE_40MHZ);
+ if (common->channel_width) {
+ radio_caps->desc_word[5] =
+ cpu_to_le16(common->channel_width << 12);
+ radio_caps->desc_word[5] |= cpu_to_le16(FULL40M_ENABLE);
+ }
+
+ if (conf_is_ht40_minus(conf)) {
+ radio_caps->desc_word[5] = 0;
+ radio_caps->desc_word[5] |=
+ cpu_to_le16(LOWER_20_ENABLE);
+ radio_caps->desc_word[5] |=
+ cpu_to_le16(LOWER_20_ENABLE >> 12);
+ }
+
+ if (conf_is_ht40_plus(conf)) {
+ radio_caps->desc_word[5] = 0;
+ radio_caps->desc_word[5] |=
+ cpu_to_le16(UPPER_20_ENABLE);
+ radio_caps->desc_word[5] |=
+ cpu_to_le16(UPPER_20_ENABLE >> 12);
+ }
+ }
+
+ radio_caps->desc_word[7] |= cpu_to_le16(radio_id << 8);
+
+ for (ii = 0; ii < MAX_HW_QUEUES; ii++) {
+ radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(3);
+ radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(0x3f);
+ radio_caps->qos_params[ii].aifsn_val_q = cpu_to_le16(2);
+ radio_caps->qos_params[ii].txop_q = 0;
+ }
+
+ for (ii = 0; ii < MAX_HW_QUEUES - 4; ii++) {
+ radio_caps->qos_params[ii].cont_win_min_q =
+ cpu_to_le16(common->edca_params[ii].cw_min);
+ radio_caps->qos_params[ii].cont_win_max_q =
+ cpu_to_le16(common->edca_params[ii].cw_max);
+ radio_caps->qos_params[ii].aifsn_val_q =
+ cpu_to_le16((common->edca_params[ii].aifs) << 8);
+ radio_caps->qos_params[ii].txop_q =
+ cpu_to_le16(common->edca_params[ii].txop);
+ }
+
+ memcpy(&common->rate_pwr[0], &gc[0], 40);
+ for (ii = 0; ii < 20; ii++)
+ radio_caps->gcpd_per_rate[inx++] =
+ cpu_to_le16(common->rate_pwr[ii] & 0x00FF);
+
+ radio_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_radio_caps) -
+ FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+
+
+ skb_put(skb, (sizeof(struct rsi_radio_caps)));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_mgmt_pkt_to_core() - This function is the entry point for Mgmt module.
+ * @common: Pointer to the driver private structure.
+ * @msg: Pointer to received packet.
+ * @msg_len: Length of the recieved packet.
+ * @type: Type of recieved packet.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
+ u8 *msg,
+ s32 msg_len,
+ u8 type)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_tx_info *info;
+ struct skb_info *rx_params;
+ u8 pad_bytes = msg[4];
+ u8 pkt_recv;
+ struct sk_buff *skb;
+ char *buffer;
+
+ if (type == RX_DOT11_MGMT) {
+ if (!adapter->sc_nvifs)
+ return -ENOLINK;
+
+ msg_len -= pad_bytes;
+ if ((msg_len <= 0) || (!msg)) {
+ rsi_dbg(MGMT_RX_ZONE,
+ "%s: Invalid rx msg of len = %d\n",
+ __func__, msg_len);
+ return -EINVAL;
+ }
+
+ skb = dev_alloc_skb(msg_len);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to allocate skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ buffer = skb_put(skb, msg_len);
+
+ memcpy(buffer,
+ (u8 *)(msg + FRAME_DESC_SZ + pad_bytes),
+ msg_len);
+
+ pkt_recv = buffer[0];
+
+ info = IEEE80211_SKB_CB(skb);
+ rx_params = (struct skb_info *)info->driver_data;
+ rx_params->rssi = rsi_get_rssi(msg);
+ rx_params->channel = rsi_get_channel(msg);
+ rsi_indicate_pkt_to_os(common, skb);
+ } else {
+ rsi_dbg(MGMT_TX_ZONE, "%s: Internal Packet\n", __func__);
+ }
+
+ return 0;
+}
+
+/**
+ * rsi_hal_send_sta_notify_frame() - This function sends the station notify
+ * frame to firmware.
+ * @common: Pointer to the driver private structure.
+ * @opmode: Operating mode of device.
+ * @notify_event: Notification about station connection.
+ * @bssid: bssid.
+ * @qos_enable: Qos is enabled.
+ * @aid: Aid (unique for all STA).
+ *
+ * Return: status: 0 on success, corresponding negative error code on failure.
+ */
+static int rsi_hal_send_sta_notify_frame(struct rsi_common *common,
+ u8 opmode,
+ u8 notify_event,
+ const unsigned char *bssid,
+ u8 qos_enable,
+ u16 aid)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_peer_notify *peer_notify;
+ u16 vap_id = 0;
+ int status;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending sta notify frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_peer_notify));
+
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_peer_notify));
+ peer_notify = (struct rsi_peer_notify *)skb->data;
+
+ peer_notify->command = cpu_to_le16(opmode << 1);
+
+ switch (notify_event) {
+ case STA_CONNECTED:
+ peer_notify->command |= cpu_to_le16(RSI_ADD_PEER);
+ break;
+ case STA_DISCONNECTED:
+ peer_notify->command |= cpu_to_le16(RSI_DELETE_PEER);
+ break;
+ default:
+ break;
+ }
+
+ peer_notify->command |= cpu_to_le16((aid & 0xfff) << 4);
+ ether_addr_copy(peer_notify->mac_addr, bssid);
+
+ peer_notify->sta_flags = cpu_to_le32((qos_enable) ? 1 : 0);
+
+ peer_notify->desc_word[0] =
+ cpu_to_le16((sizeof(struct rsi_peer_notify) - FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+ peer_notify->desc_word[1] = cpu_to_le16(PEER_NOTIFY);
+ peer_notify->desc_word[7] |= cpu_to_le16(vap_id << 8);
+
+ skb_put(skb, sizeof(struct rsi_peer_notify));
+
+ status = rsi_send_internal_mgmt_frame(common, skb);
+
+ if (!status && qos_enable) {
+ rsi_set_contention_vals(common);
+ status = rsi_load_radio_caps(common);
+ }
+ return status;
+}
+
+/**
+ * rsi_send_aggregation_params_frame() - This function sends the ampdu
+ * indication frame to firmware.
+ * @common: Pointer to the driver private structure.
+ * @tid: traffic identifier.
+ * @ssn: ssn.
+ * @buf_size: buffer size.
+ * @event: notification about station connection.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+int rsi_send_aggregation_params_frame(struct rsi_common *common,
+ u16 tid,
+ u16 ssn,
+ u8 buf_size,
+ u8 event)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_mac_frame *mgmt_frame;
+ u8 peer_id = 0;
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending AMPDU indication frame\n", __func__);
+
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ mgmt_frame->desc_word[1] = cpu_to_le16(AMPDU_IND);
+
+ if (event == STA_TX_ADDBA_DONE) {
+ mgmt_frame->desc_word[4] = cpu_to_le16(ssn);
+ mgmt_frame->desc_word[5] = cpu_to_le16(buf_size);
+ mgmt_frame->desc_word[7] =
+ cpu_to_le16((tid | (START_AMPDU_AGGR << 4) | (peer_id << 8)));
+ } else if (event == STA_RX_ADDBA_DONE) {
+ mgmt_frame->desc_word[4] = cpu_to_le16(ssn);
+ mgmt_frame->desc_word[7] = cpu_to_le16(tid |
+ (START_AMPDU_AGGR << 4) |
+ (RX_BA_INDICATION << 5) |
+ (peer_id << 8));
+ } else if (event == STA_TX_DELBA) {
+ mgmt_frame->desc_word[7] = cpu_to_le16(tid |
+ (STOP_AMPDU_AGGR << 4) |
+ (peer_id << 8));
+ } else if (event == STA_RX_DELBA) {
+ mgmt_frame->desc_word[7] = cpu_to_le16(tid |
+ (STOP_AMPDU_AGGR << 4) |
+ (RX_BA_INDICATION << 5) |
+ (peer_id << 8));
+ }
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_program_bb_rf() - This function starts base band and RF programming.
+ * This is called after initial configurations are done.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+static int rsi_program_bb_rf(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_mac_frame *mgmt_frame;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending program BB/RF frame\n", __func__);
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ mgmt_frame->desc_word[1] = cpu_to_le16(BBP_PROG_IN_TA);
+ mgmt_frame->desc_word[4] = cpu_to_le16(common->endpoint << 8);
+
+ if (common->rf_reset) {
+ mgmt_frame->desc_word[7] = cpu_to_le16(RF_RESET_ENABLE);
+ rsi_dbg(MGMT_TX_ZONE, "%s: ===> RF RESET REQUEST SENT <===\n",
+ __func__);
+ common->rf_reset = 0;
+ }
+ common->bb_rf_prog_count = 1;
+ mgmt_frame->desc_word[7] |= cpu_to_le16(PUT_BBP_RESET |
+ BBP_REG_WRITE | (RSI_RF_TYPE << 4));
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_set_vap_capabilities() - This function send vap capability to firmware.
+ * @common: Pointer to the driver private structure.
+ * @opmode: Operating mode of device.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_vap_caps *vap_caps;
+ u16 vap_id = 0;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_vap_caps));
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_vap_caps));
+ vap_caps = (struct rsi_vap_caps *)skb->data;
+
+ vap_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_vap_caps) -
+ FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+ vap_caps->desc_word[1] = cpu_to_le16(VAP_CAPABILITIES);
+ vap_caps->desc_word[4] = cpu_to_le16(mode |
+ (common->channel_width << 8));
+ vap_caps->desc_word[7] = cpu_to_le16((vap_id << 8) |
+ (common->mac_id << 4) |
+ common->radio_id);
+
+ memcpy(vap_caps->mac_addr, common->mac_addr, IEEE80211_ADDR_LEN);
+ vap_caps->keep_alive_period = cpu_to_le16(90);
+ vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD);
+
+ vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
+ vap_caps->default_mgmt_rate = 0;
+ if (conf_is_ht40(&common->priv->hw->conf)) {
+ vap_caps->default_ctrl_rate =
+ cpu_to_le32(RSI_RATE_6 | FULL40M_ENABLE << 16);
+ } else {
+ vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
+ }
+ vap_caps->default_data_rate = 0;
+ vap_caps->beacon_interval = cpu_to_le16(200);
+ vap_caps->dtim_period = cpu_to_le16(4);
+
+ skb_put(skb, sizeof(*vap_caps));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_hal_load_key() - This function is used to load keys within the firmware.
+ * @common: Pointer to the driver private structure.
+ * @data: Pointer to the key data.
+ * @key_len: Key length to be loaded.
+ * @key_type: Type of key: GROUP/PAIRWISE.
+ * @key_id: Key index.
+ * @cipher: Type of cipher used.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_hal_load_key(struct rsi_common *common,
+ u8 *data,
+ u16 key_len,
+ u8 key_type,
+ u8 key_id,
+ u32 cipher)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_set_key *set_key;
+ u16 key_descriptor = 0;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending load key frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_set_key));
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_set_key));
+ set_key = (struct rsi_set_key *)skb->data;
+
+ if ((cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ key_len += 1;
+ key_descriptor |= BIT(2);
+ if (key_len >= 13)
+ key_descriptor |= BIT(3);
+ } else if (cipher != KEY_TYPE_CLEAR) {
+ key_descriptor |= BIT(4);
+ if (key_type == RSI_PAIRWISE_KEY)
+ key_id = 0;
+ if (cipher == WLAN_CIPHER_SUITE_TKIP)
+ key_descriptor |= BIT(5);
+ }
+ key_descriptor |= (key_type | BIT(13) | (key_id << 14));
+
+ set_key->desc_word[0] = cpu_to_le16((sizeof(struct rsi_set_key) -
+ FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+ set_key->desc_word[1] = cpu_to_le16(SET_KEY_REQ);
+ set_key->desc_word[4] = cpu_to_le16(key_descriptor);
+
+ if ((cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ memcpy(&set_key->key[key_id][1],
+ data,
+ key_len * 2);
+ } else {
+ memcpy(&set_key->key[0][0], data, key_len);
+ }
+
+ memcpy(set_key->tx_mic_key, &data[16], 8);
+ memcpy(set_key->rx_mic_key, &data[24], 8);
+
+ skb_put(skb, sizeof(struct rsi_set_key));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/*
+ * rsi_load_bootup_params() - This function send bootup params to the firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+static u8 rsi_load_bootup_params(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_boot_params *boot_params;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__);
+ skb = dev_alloc_skb(sizeof(struct rsi_boot_params));
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_boot_params));
+ boot_params = (struct rsi_boot_params *)skb->data;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s:\n", __func__);
+
+ if (common->channel_width == BW_40MHZ) {
+ memcpy(&boot_params->bootup_params,
+ &boot_params_40,
+ sizeof(struct bootup_params));
+ rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__,
+ UMAC_CLK_40BW);
+ boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40BW);
+ } else {
+ memcpy(&boot_params->bootup_params,
+ &boot_params_20,
+ sizeof(struct bootup_params));
+ if (boot_params_20.valid != cpu_to_le32(VALID_20)) {
+ boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_20BW);
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Packet 20MHZ <=== %d\n", __func__,
+ UMAC_CLK_20BW);
+ } else {
+ boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40MHZ);
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Packet 20MHZ <=== %d\n", __func__,
+ UMAC_CLK_40MHZ);
+ }
+ }
+
+ /**
+ * Bit{0:11} indicates length of the Packet
+ * Bit{12:15} indicates host queue number
+ */
+ boot_params->desc_word[0] = cpu_to_le16(sizeof(struct bootup_params) |
+ (RSI_WIFI_MGMT_Q << 12));
+ boot_params->desc_word[1] = cpu_to_le16(BOOTUP_PARAMS_REQUEST);
+
+ skb_put(skb, sizeof(struct rsi_boot_params));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_send_reset_mac() - This function prepares reset MAC request and sends an
+ * internal management frame to indicate it to firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+static int rsi_send_reset_mac(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_mac_frame *mgmt_frame;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending reset MAC frame\n", __func__);
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ mgmt_frame->desc_word[1] = cpu_to_le16(RESET_MAC_REQ);
+ mgmt_frame->desc_word[4] = cpu_to_le16(RETRY_COUNT << 8);
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_set_channel() - This function programs the channel.
+ * @common: Pointer to the driver private structure.
+ * @channel: Channel value to be set.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+int rsi_set_channel(struct rsi_common *common, u16 channel)
+{
+ struct sk_buff *skb = NULL;
+ struct rsi_mac_frame *mgmt_frame;
+
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Sending scan req frame\n", __func__);
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ if (common->band == IEEE80211_BAND_5GHZ) {
+ if ((channel >= 36) && (channel <= 64))
+ channel = ((channel - 32) / 4);
+ else if ((channel > 64) && (channel <= 140))
+ channel = ((channel - 102) / 4) + 8;
+ else if (channel >= 149)
+ channel = ((channel - 151) / 4) + 18;
+ else
+ return -EINVAL;
+ } else {
+ if (channel > 14) {
+ rsi_dbg(ERR_ZONE, "%s: Invalid chno %d, band = %d\n",
+ __func__, channel, common->band);
+ return -EINVAL;
+ }
+ }
+
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
+ mgmt_frame->desc_word[4] = cpu_to_le16(channel);
+
+ mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET |
+ BBP_REG_WRITE |
+ (RSI_RF_TYPE << 4));
+
+ mgmt_frame->desc_word[5] = cpu_to_le16(0x01);
+
+ if (common->channel_width == BW_40MHZ)
+ mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8);
+
+ common->channel = channel;
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_compare() - This function is used to compare two integers
+ * @a: pointer to the first integer
+ * @b: pointer to the second integer
+ *
+ * Return: 0 if both are equal, -1 if the first is smaller, else 1
+ */
+static int rsi_compare(const void *a, const void *b)
+{
+ u16 _a = *(const u16 *)(a);
+ u16 _b = *(const u16 *)(b);
+
+ if (_a > _b)
+ return -1;
+
+ if (_a < _b)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * rsi_map_rates() - This function is used to map selected rates to hw rates.
+ * @rate: The standard rate to be mapped.
+ * @offset: Offset that will be returned.
+ *
+ * Return: 0 if it is a mcs rate, else 1
+ */
+static bool rsi_map_rates(u16 rate, int *offset)
+{
+ int kk;
+ for (kk = 0; kk < ARRAY_SIZE(rsi_mcsrates); kk++) {
+ if (rate == mcs[kk]) {
+ *offset = kk;
+ return false;
+ }
+ }
+
+ for (kk = 0; kk < ARRAY_SIZE(rsi_rates); kk++) {
+ if (rate == rsi_rates[kk].bitrate / 5) {
+ *offset = kk;
+ break;
+ }
+ }
+ return true;
+}
+
+/**
+ * rsi_send_auto_rate_request() - This function is to set rates for connection
+ * and send autorate request to firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+static int rsi_send_auto_rate_request(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_auto_rate *auto_rate;
+ int ii = 0, jj = 0, kk = 0;
+ struct ieee80211_hw *hw = common->priv->hw;
+ u8 band = hw->conf.chandef.chan->band;
+ u8 num_supported_rates = 0;
+ u8 rate_offset = 0;
+ u32 rate_bitmap = common->bitrate_mask[band];
+
+ u16 *selected_rates, min_rate;
+
+ skb = dev_alloc_skb(sizeof(struct rsi_auto_rate));
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ selected_rates = kmalloc(2 * RSI_TBL_SZ, GFP_KERNEL);
+ if (!selected_rates) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, sizeof(struct rsi_auto_rate));
+ memset(selected_rates, 0, 2 * RSI_TBL_SZ);
+
+ auto_rate = (struct rsi_auto_rate *)skb->data;
+
+ auto_rate->aarf_rssi = cpu_to_le16(((u16)3 << 6) | (u16)(18 & 0x3f));
+ auto_rate->collision_tolerance = cpu_to_le16(3);
+ auto_rate->failure_limit = cpu_to_le16(3);
+ auto_rate->initial_boundary = cpu_to_le16(3);
+ auto_rate->max_threshold_limt = cpu_to_le16(27);
+
+ auto_rate->desc_word[1] = cpu_to_le16(AUTO_RATE_IND);
+
+ if (common->channel_width == BW_40MHZ)
+ auto_rate->desc_word[7] |= cpu_to_le16(1);
+
+ if (band == IEEE80211_BAND_2GHZ)
+ min_rate = STD_RATE_01;
+ else
+ min_rate = STD_RATE_06;
+
+ for (ii = 0, jj = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
+ if (rate_bitmap & BIT(ii)) {
+ selected_rates[jj++] = (rsi_rates[ii].bitrate / 5);
+ rate_offset++;
+ }
+ }
+ num_supported_rates = jj;
+
+ if (common->vif_info[0].is_ht) {
+ for (ii = 0; ii < ARRAY_SIZE(mcs); ii++)
+ selected_rates[jj++] = mcs[ii];
+ num_supported_rates += ARRAY_SIZE(mcs);
+ rate_offset += ARRAY_SIZE(mcs);
+ }
+
+ if (rate_offset < (RSI_TBL_SZ / 2) - 1) {
+ for (ii = jj; ii < (RSI_TBL_SZ / 2); ii++) {
+ selected_rates[jj++] = min_rate;
+ rate_offset++;
+ }
+ }
+
+ sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL);
+
+ /* mapping the rates to RSI rates */
+ for (ii = 0; ii < jj; ii++) {
+ if (rsi_map_rates(selected_rates[ii], &kk)) {
+ auto_rate->supported_rates[ii] =
+ cpu_to_le16(rsi_rates[kk].hw_value);
+ } else {
+ auto_rate->supported_rates[ii] =
+ cpu_to_le16(rsi_mcsrates[kk]);
+ }
+ }
+
+ /* loading HT rates in the bottom half of the auto rate table */
+ if (common->vif_info[0].is_ht) {
+ if (common->vif_info[0].sgi)
+ auto_rate->supported_rates[rate_offset++] =
+ cpu_to_le16(RSI_RATE_MCS7_SG);
+
+ for (ii = rate_offset, kk = ARRAY_SIZE(rsi_mcsrates) - 1;
+ ii < rate_offset + 2 * ARRAY_SIZE(rsi_mcsrates); ii++) {
+ if (common->vif_info[0].sgi)
+ auto_rate->supported_rates[ii++] =
+ cpu_to_le16(rsi_mcsrates[kk] | BIT(9));
+ auto_rate->supported_rates[ii] =
+ cpu_to_le16(rsi_mcsrates[kk--]);
+ }
+
+ for (; ii < RSI_TBL_SZ; ii++) {
+ auto_rate->supported_rates[ii] =
+ cpu_to_le16(rsi_mcsrates[0]);
+ }
+ }
+
+ auto_rate->num_supported_rates = cpu_to_le16(num_supported_rates * 2);
+ auto_rate->moderate_rate_inx = cpu_to_le16(num_supported_rates / 2);
+ auto_rate->desc_word[7] |= cpu_to_le16(0 << 8);
+ num_supported_rates *= 2;
+
+ auto_rate->desc_word[0] = cpu_to_le16((sizeof(*auto_rate) -
+ FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+
+ skb_put(skb,
+ sizeof(struct rsi_auto_rate));
+ kfree(selected_rates);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_inform_bss_status() - This function informs about bss status with the
+ * help of sta notify params by sending an internal
+ * management frame to firmware.
+ * @common: Pointer to the driver private structure.
+ * @status: Bss status type.
+ * @bssid: Bssid.
+ * @qos_enable: Qos is enabled.
+ * @aid: Aid (unique for all STAs).
+ *
+ * Return: None.
+ */
+void rsi_inform_bss_status(struct rsi_common *common,
+ u8 status,
+ const unsigned char *bssid,
+ u8 qos_enable,
+ u16 aid)
+{
+ if (status) {
+ rsi_hal_send_sta_notify_frame(common,
+ NL80211_IFTYPE_STATION,
+ STA_CONNECTED,
+ bssid,
+ qos_enable,
+ aid);
+ if (common->min_rate == 0xffff)
+ rsi_send_auto_rate_request(common);
+ } else {
+ rsi_hal_send_sta_notify_frame(common,
+ NL80211_IFTYPE_STATION,
+ STA_DISCONNECTED,
+ bssid,
+ qos_enable,
+ aid);
+ }
+}
+
+/**
+ * rsi_eeprom_read() - This function sends a frame to read the mac address
+ * from the eeprom.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_eeprom_read(struct rsi_common *common)
+{
+ struct rsi_mac_frame *mgmt_frame;
+ struct sk_buff *skb;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending EEPROM read req frame\n", __func__);
+
+ skb = dev_alloc_skb(FRAME_DESC_SZ);
+ if (!skb) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+ /* FrameType */
+ mgmt_frame->desc_word[1] = cpu_to_le16(EEPROM_READ_TYPE);
+ mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+ /* Number of bytes to read */
+ mgmt_frame->desc_word[3] = cpu_to_le16(ETH_ALEN +
+ WLAN_MAC_MAGIC_WORD_LEN +
+ WLAN_HOST_MODE_LEN +
+ WLAN_FW_VERSION_LEN);
+ /* Address to read */
+ mgmt_frame->desc_word[4] = cpu_to_le16(WLAN_MAC_EEPROM_ADDR);
+
+ skb_put(skb, FRAME_DESC_SZ);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_handle_ta_confirm_type() - This function handles the confirm frames.
+ * @common: Pointer to the driver private structure.
+ * @msg: Pointer to received packet.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_handle_ta_confirm_type(struct rsi_common *common,
+ u8 *msg)
+{
+ u8 sub_type = (msg[15] & 0xff);
+
+ switch (sub_type) {
+ case BOOTUP_PARAMS_REQUEST:
+ rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n",
+ __func__);
+ if (common->fsm_state == FSM_BOOT_PARAMS_SENT) {
+ if (rsi_eeprom_read(common)) {
+ common->fsm_state = FSM_CARD_NOT_READY;
+ goto out;
+ } else {
+ common->fsm_state = FSM_EEPROM_READ_MAC_ADDR;
+ }
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Received bootup params cfm in %d state\n",
+ __func__, common->fsm_state);
+ return 0;
+ }
+ break;
+
+ case EEPROM_READ_TYPE:
+ if (common->fsm_state == FSM_EEPROM_READ_MAC_ADDR) {
+ if (msg[16] == MAGIC_WORD) {
+ u8 offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN
+ + WLAN_MAC_MAGIC_WORD_LEN);
+ memcpy(common->mac_addr,
+ &msg[offset],
+ ETH_ALEN);
+ memcpy(&common->fw_ver,
+ &msg[offset + ETH_ALEN],
+ sizeof(struct version_info));
+
+ } else {
+ common->fsm_state = FSM_CARD_NOT_READY;
+ break;
+ }
+ if (rsi_send_reset_mac(common))
+ goto out;
+ else
+ common->fsm_state = FSM_RESET_MAC_SENT;
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Received eeprom mac addr in %d state\n",
+ __func__, common->fsm_state);
+ return 0;
+ }
+ break;
+
+ case RESET_MAC_REQ:
+ if (common->fsm_state == FSM_RESET_MAC_SENT) {
+ rsi_dbg(FSM_ZONE, "%s: Reset MAC cfm received\n",
+ __func__);
+
+ if (rsi_load_radio_caps(common))
+ goto out;
+ else
+ common->fsm_state = FSM_RADIO_CAPS_SENT;
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Received reset mac cfm in %d state\n",
+ __func__, common->fsm_state);
+ return 0;
+ }
+ break;
+
+ case RADIO_CAPABILITIES:
+ if (common->fsm_state == FSM_RADIO_CAPS_SENT) {
+ common->rf_reset = 1;
+ if (rsi_program_bb_rf(common)) {
+ goto out;
+ } else {
+ common->fsm_state = FSM_BB_RF_PROG_SENT;
+ rsi_dbg(FSM_ZONE, "%s: Radio cap cfm received\n",
+ __func__);
+ }
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Received radio caps cfm in %d state\n",
+ __func__, common->fsm_state);
+ return 0;
+ }
+ break;
+
+ case BB_PROG_VALUES_REQUEST:
+ case RF_PROG_VALUES_REQUEST:
+ case BBP_PROG_IN_TA:
+ rsi_dbg(FSM_ZONE, "%s: BB/RF cfm received\n", __func__);
+ if (common->fsm_state == FSM_BB_RF_PROG_SENT) {
+ common->bb_rf_prog_count--;
+ if (!common->bb_rf_prog_count) {
+ common->fsm_state = FSM_MAC_INIT_DONE;
+ return rsi_mac80211_attach(common);
+ }
+ } else {
+ goto out;
+ }
+ break;
+
+ default:
+ rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n",
+ __func__);
+ break;
+ }
+ return 0;
+out:
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt/Invalid frame received\n",
+ __func__);
+ return -EINVAL;
+}
+
+/**
+ * rsi_mgmt_pkt_recv() - This function processes the management packets
+ * recieved from the hardware.
+ * @common: Pointer to the driver private structure.
+ * @msg: Pointer to the received packet.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
+{
+ s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff);
+ u16 msg_type = (msg[2]);
+
+ rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n",
+ __func__, msg_len, msg_type);
+
+ if (msg_type == TA_CONFIRM_TYPE) {
+ return rsi_handle_ta_confirm_type(common, msg);
+ } else if (msg_type == CARD_READY_IND) {
+ rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n",
+ __func__);
+ if (common->fsm_state == FSM_CARD_NOT_READY) {
+ rsi_set_default_parameters(common);
+
+ if (rsi_load_bootup_params(common))
+ return -ENOMEM;
+ else
+ common->fsm_state = FSM_BOOT_PARAMS_SENT;
+ } else {
+ return -EINVAL;
+ }
+ } else if (msg_type == TX_STATUS_IND) {
+ if (msg[15] == PROBEREQ_CONFIRM) {
+ common->mgmt_q_block = false;
+ rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n",
+ __func__);
+ }
+ } else {
+ return rsi_mgmt_pkt_to_core(common, msg, msg_len, msg_type);
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c
new file mode 100644
index 000000000000..8e48e72bae20
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c
@@ -0,0 +1,196 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "rsi_mgmt.h"
+
+/**
+ * rsi_send_data_pkt() - This function sends the recieved data packet from
+ * driver to device.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hdr *tmp_hdr = NULL;
+ struct ieee80211_tx_info *info;
+ struct skb_info *tx_params;
+ struct ieee80211_bss_conf *bss = NULL;
+ int status = -EINVAL;
+ u8 ieee80211_size = MIN_802_11_HDR_LEN;
+ u8 extnd_size = 0;
+ __le16 *frame_desc;
+ u16 seq_num = 0;
+
+ info = IEEE80211_SKB_CB(skb);
+ bss = &info->control.vif->bss_conf;
+ tx_params = (struct skb_info *)info->driver_data;
+
+ if (!bss->assoc)
+ goto err;
+
+ tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
+ seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
+
+ extnd_size = ((uintptr_t)skb->data & 0x3);
+
+ if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+ status = -ENOSPC;
+ goto err;
+ }
+
+ skb_push(skb, (FRAME_DESC_SZ + extnd_size));
+ frame_desc = (__le16 *)&skb->data[0];
+ memset((u8 *)frame_desc, 0, FRAME_DESC_SZ);
+
+ if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
+ ieee80211_size += 2;
+ frame_desc[6] |= cpu_to_le16(BIT(12));
+ }
+
+ if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
+ (common->secinfo.security_enable)) {
+ if (rsi_is_cipher_wep(common))
+ ieee80211_size += 4;
+ else
+ ieee80211_size += 8;
+ frame_desc[6] |= cpu_to_le16(BIT(15));
+ }
+
+ frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
+ (RSI_WIFI_DATA_Q << 12));
+ frame_desc[2] = cpu_to_le16((extnd_size) | (ieee80211_size) << 8);
+
+ if (common->min_rate != 0xffff) {
+ /* Send fixed rate */
+ frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE);
+ frame_desc[4] = cpu_to_le16(common->min_rate);
+ }
+
+ frame_desc[6] |= cpu_to_le16(seq_num & 0xfff);
+ frame_desc[7] = cpu_to_le16(((tx_params->tid & 0xf) << 4) |
+ (skb->priority & 0xf) |
+ (tx_params->sta_id << 8));
+
+ status = adapter->host_intf_write_pkt(common->priv,
+ skb->data,
+ skb->len);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n",
+ __func__);
+
+err:
+ ++common->tx_stats.total_tx_pkt_freed[skb->priority];
+ rsi_indicate_tx_status(common->priv, skb, status);
+ return status;
+}
+
+/**
+ * rsi_send_mgmt_pkt() - This functions sends the received management packet
+ * from driver to device.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_send_mgmt_pkt(struct rsi_common *common,
+ struct sk_buff *skb)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_hdr *wh = NULL;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_bss_conf *bss = NULL;
+ struct skb_info *tx_params;
+ int status = -E2BIG;
+ __le16 *msg = NULL;
+ u8 extnd_size = 0;
+ u8 vap_id = 0;
+
+ info = IEEE80211_SKB_CB(skb);
+ tx_params = (struct skb_info *)info->driver_data;
+ extnd_size = ((uintptr_t)skb->data & 0x3);
+
+ if (tx_params->flags & INTERNAL_MGMT_PKT) {
+ if ((extnd_size) > skb_headroom(skb)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+ dev_kfree_skb(skb);
+ return -ENOSPC;
+ }
+ skb_push(skb, extnd_size);
+ skb->data[extnd_size + 4] = extnd_size;
+ status = adapter->host_intf_write_pkt(common->priv,
+ (u8 *)skb->data,
+ skb->len);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write the packet\n", __func__);
+ }
+ dev_kfree_skb(skb);
+ return status;
+ }
+
+ bss = &info->control.vif->bss_conf;
+ wh = (struct ieee80211_hdr *)&skb->data[0];
+
+ if (FRAME_DESC_SZ > skb_headroom(skb))
+ goto err;
+
+ skb_push(skb, FRAME_DESC_SZ);
+ memset(skb->data, 0, FRAME_DESC_SZ);
+ msg = (__le16 *)skb->data;
+
+ if (skb->len > MAX_MGMT_PKT_SIZE) {
+ rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__);
+ goto err;
+ }
+
+ msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
+ (RSI_WIFI_MGMT_Q << 12));
+ msg[1] = cpu_to_le16(TX_DOT11_MGMT);
+ msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8);
+ msg[3] = cpu_to_le16(RATE_INFO_ENABLE);
+ msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4);
+
+ if (wh->addr1[0] & BIT(0))
+ msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
+
+ if (common->band == IEEE80211_BAND_2GHZ)
+ msg[4] = cpu_to_le16(RSI_11B_MODE);
+ else
+ msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
+
+ /* Indicate to firmware to give cfm */
+ if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) {
+ msg[1] |= cpu_to_le16(BIT(10));
+ msg[7] = cpu_to_le16(PROBEREQ_CONFIRM);
+ common->mgmt_q_block = true;
+ }
+
+ msg[7] |= cpu_to_le16(vap_id << 8);
+
+ status = adapter->host_intf_write_pkt(common->priv,
+ (u8 *)msg,
+ skb->len);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__);
+
+err:
+ rsi_indicate_tx_status(common->priv, skb, status);
+ return status;
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
new file mode 100644
index 000000000000..852453f386e2
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -0,0 +1,850 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include "rsi_sdio.h"
+#include "rsi_common.h"
+
+/**
+ * rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg.
+ * @rw: Read/write
+ * @func: function number
+ * @raw: indicates whether to perform read after write
+ * @address: address to which to read/write
+ * @writedata: data to write
+ *
+ * Return: argument
+ */
+static u32 rsi_sdio_set_cmd52_arg(bool rw,
+ u8 func,
+ u8 raw,
+ u32 address,
+ u8 writedata)
+{
+ return ((rw & 1) << 31) | ((func & 0x7) << 28) |
+ ((raw & 1) << 27) | (1 << 26) |
+ ((address & 0x1FFFF) << 9) | (1 << 8) |
+ (writedata & 0xFF);
+}
+
+/**
+ * rsi_cmd52writebyte() - This function issues cmd52 byte write onto the card.
+ * @card: Pointer to the mmc_card.
+ * @address: Address to write.
+ * @byte: Data to write.
+ *
+ * Return: Write status.
+ */
+static int rsi_cmd52writebyte(struct mmc_card *card,
+ u32 address,
+ u8 byte)
+{
+ struct mmc_command io_cmd;
+ u32 arg;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ arg = rsi_sdio_set_cmd52_arg(1, 0, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.arg = arg;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+/**
+ * rsi_cmd52readbyte() - This function issues cmd52 byte read onto the card.
+ * @card: Pointer to the mmc_card.
+ * @address: Address to read from.
+ * @byte: Variable to store read value.
+ *
+ * Return: Read status.
+ */
+static int rsi_cmd52readbyte(struct mmc_card *card,
+ u32 address,
+ u8 *byte)
+{
+ struct mmc_command io_cmd;
+ u32 arg;
+ int err;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ arg = rsi_sdio_set_cmd52_arg(0, 0, 0, address, 0);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.arg = arg;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &io_cmd, 0);
+ if ((!err) && (byte))
+ *byte = io_cmd.resp[0] & 0xFF;
+ return err;
+}
+
+/**
+ * rsi_issue_sdiocommand() - This function issues sdio commands.
+ * @func: Pointer to the sdio_func structure.
+ * @opcode: Opcode value.
+ * @arg: Arguments to pass.
+ * @flags: Flags which are set.
+ * @resp: Pointer to store response.
+ *
+ * Return: err: command status as 0 or -1.
+ */
+static int rsi_issue_sdiocommand(struct sdio_func *func,
+ u32 opcode,
+ u32 arg,
+ u32 flags,
+ u32 *resp)
+{
+ struct mmc_command cmd;
+ struct mmc_host *host;
+ int err;
+
+ host = func->card->host;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ cmd.opcode = opcode;
+ cmd.arg = arg;
+ cmd.flags = flags;
+ err = mmc_wait_for_cmd(host, &cmd, 3);
+
+ if ((!err) && (resp))
+ *resp = cmd.resp[0];
+
+ return err;
+}
+
+/**
+ * rsi_handle_interrupt() - This function is called upon the occurence
+ * of an interrupt.
+ * @function: Pointer to the sdio_func structure.
+ *
+ * Return: None.
+ */
+static void rsi_handle_interrupt(struct sdio_func *function)
+{
+ struct rsi_hw *adapter = sdio_get_drvdata(function);
+
+ sdio_release_host(function);
+ rsi_interrupt_handler(adapter);
+ sdio_claim_host(function);
+}
+
+/**
+ * rsi_reset_card() - This function resets and re-initializes the card.
+ * @pfunction: Pointer to the sdio_func structure.
+ *
+ * Return: None.
+ */
+static void rsi_reset_card(struct sdio_func *pfunction)
+{
+ int ret = 0;
+ int err;
+ struct mmc_card *card = pfunction->card;
+ struct mmc_host *host = card->host;
+ s32 bit = (fls(host->ocr_avail) - 1);
+ u8 cmd52_resp;
+ u32 clock, resp, i;
+ u16 rca;
+
+ /* Reset 9110 chip */
+ ret = rsi_cmd52writebyte(pfunction->card,
+ SDIO_CCCR_ABORT,
+ (1 << 3));
+
+ /* Card will not send any response as it is getting reset immediately
+ * Hence expect a timeout status from host controller
+ */
+ if (ret != -ETIMEDOUT)
+ rsi_dbg(ERR_ZONE, "%s: Reset failed : %d\n", __func__, ret);
+
+ /* Wait for few milli seconds to get rid of residue charges if any */
+ msleep(20);
+
+ /* Initialize the SDIO card */
+ host->ios.vdd = bit;
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ host->ios.power_mode = MMC_POWER_UP;
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+ host->ops->set_ios(host, &host->ios);
+
+ /*
+ * This delay should be sufficient to allow the power supply
+ * to reach the minimum voltage.
+ */
+ msleep(20);
+
+ host->ios.clock = host->f_min;
+ host->ios.power_mode = MMC_POWER_ON;
+ host->ops->set_ios(host, &host->ios);
+
+ /*
+ * This delay must be at least 74 clock sizes, or 1 ms, or the
+ * time required to reach a stable voltage.
+ */
+ msleep(20);
+
+ /* Issue CMD0. Goto idle state */
+ host->ios.chip_select = MMC_CS_HIGH;
+ host->ops->set_ios(host, &host->ios);
+ msleep(20);
+ err = rsi_issue_sdiocommand(pfunction,
+ MMC_GO_IDLE_STATE,
+ 0,
+ (MMC_RSP_NONE | MMC_CMD_BC),
+ NULL);
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ops->set_ios(host, &host->ios);
+ msleep(20);
+ host->use_spi_crc = 0;
+
+ if (err)
+ rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err);
+
+ if (!host->ocr_avail) {
+ /* Issue CMD5, arg = 0 */
+ err = rsi_issue_sdiocommand(pfunction,
+ SD_IO_SEND_OP_COND,
+ 0,
+ (MMC_RSP_R4 | MMC_CMD_BCR),
+ &resp);
+ if (err)
+ rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+ __func__, err);
+ host->ocr_avail = resp;
+ }
+
+ /* Issue CMD5, arg = ocr. Wait till card is ready */
+ for (i = 0; i < 100; i++) {
+ err = rsi_issue_sdiocommand(pfunction,
+ SD_IO_SEND_OP_COND,
+ host->ocr_avail,
+ (MMC_RSP_R4 | MMC_CMD_BCR),
+ &resp);
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+ __func__, err);
+ break;
+ }
+
+ if (resp & MMC_CARD_BUSY)
+ break;
+ msleep(20);
+ }
+
+ if ((i == 100) || (err)) {
+ rsi_dbg(ERR_ZONE, "%s: card in not ready : %d %d\n",
+ __func__, i, err);
+ return;
+ }
+
+ /* Issue CMD3, get RCA */
+ err = rsi_issue_sdiocommand(pfunction,
+ SD_SEND_RELATIVE_ADDR,
+ 0,
+ (MMC_RSP_R6 | MMC_CMD_BCR),
+ &resp);
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: CMD3 failed : %d\n", __func__, err);
+ return;
+ }
+ rca = resp >> 16;
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->ops->set_ios(host, &host->ios);
+
+ /* Issue CMD7, select card */
+ err = rsi_issue_sdiocommand(pfunction,
+ MMC_SELECT_CARD,
+ (rca << 16),
+ (MMC_RSP_R1 | MMC_CMD_AC),
+ NULL);
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: CMD7 failed : %d\n", __func__, err);
+ return;
+ }
+
+ /* Enable high speed */
+ if (card->host->caps & MMC_CAP_SD_HIGHSPEED) {
+ rsi_dbg(ERR_ZONE, "%s: Set high speed mode\n", __func__);
+ err = rsi_cmd52readbyte(card, SDIO_CCCR_SPEED, &cmd52_resp);
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: CCCR speed reg read failed: %d\n",
+ __func__, err);
+ card->state &= ~MMC_STATE_HIGHSPEED;
+ } else {
+ err = rsi_cmd52writebyte(card,
+ SDIO_CCCR_SPEED,
+ (cmd52_resp | SDIO_SPEED_EHS));
+ if (err) {
+ rsi_dbg(ERR_ZONE,
+ "%s: CCR speed regwrite failed %d\n",
+ __func__, err);
+ return;
+ }
+ mmc_card_set_highspeed(card);
+ host->ios.timing = MMC_TIMING_SD_HS;
+ host->ops->set_ios(host, &host->ios);
+ }
+ }
+
+ /* Set clock */
+ if (mmc_card_highspeed(card))
+ clock = 50000000;
+ else
+ clock = card->cis.max_dtr;
+
+ if (clock > host->f_max)
+ clock = host->f_max;
+
+ host->ios.clock = clock;
+ host->ops->set_ios(host, &host->ios);
+
+ if (card->host->caps & MMC_CAP_4_BIT_DATA) {
+ /* CMD52: Set bus width & disable card detect resistor */
+ err = rsi_cmd52writebyte(card,
+ SDIO_CCCR_IF,
+ (SDIO_BUS_CD_DISABLE |
+ SDIO_BUS_WIDTH_4BIT));
+ if (err) {
+ rsi_dbg(ERR_ZONE, "%s: Set bus mode failed : %d\n",
+ __func__, err);
+ return;
+ }
+ host->ios.bus_width = MMC_BUS_WIDTH_4;
+ host->ops->set_ios(host, &host->ios);
+ }
+}
+
+/**
+ * rsi_setclock() - This function sets the clock frequency.
+ * @adapter: Pointer to the adapter structure.
+ * @freq: Clock frequency.
+ *
+ * Return: None.
+ */
+static void rsi_setclock(struct rsi_hw *adapter, u32 freq)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ struct mmc_host *host = dev->pfunction->card->host;
+ u32 clock;
+
+ clock = freq * 1000;
+ if (clock > host->f_max)
+ clock = host->f_max;
+ host->ios.clock = clock;
+ host->ops->set_ios(host, &host->ios);
+}
+
+/**
+ * rsi_setblocklength() - This function sets the host block length.
+ * @adapter: Pointer to the adapter structure.
+ * @length: Block length to be set.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_setblocklength(struct rsi_hw *adapter, u32 length)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status;
+ rsi_dbg(INIT_ZONE, "%s: Setting the block length\n", __func__);
+
+ status = sdio_set_block_size(dev->pfunction, length);
+ dev->pfunction->max_blksize = 256;
+
+ rsi_dbg(INFO_ZONE,
+ "%s: Operational blk length is %d\n", __func__, length);
+ return status;
+}
+
+/**
+ * rsi_setupcard() - This function queries and sets the card's features.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_setupcard(struct rsi_hw *adapter)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status = 0;
+
+ rsi_setclock(adapter, 50000);
+
+ dev->tx_blk_size = 256;
+ status = rsi_setblocklength(adapter, dev->tx_blk_size);
+ if (status)
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set block length\n", __func__);
+ return status;
+}
+
+/**
+ * rsi_sdio_read_register() - This function reads one byte of information
+ * from a register.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that stores the data read.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_read_register(struct rsi_hw *adapter,
+ u32 addr,
+ u8 *data)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u8 fun_num = 0;
+ int status;
+
+ sdio_claim_host(dev->pfunction);
+
+ if (fun_num == 0)
+ *data = sdio_f0_readb(dev->pfunction, addr, &status);
+ else
+ *data = sdio_readb(dev->pfunction, addr, &status);
+
+ sdio_release_host(dev->pfunction);
+
+ return status;
+}
+
+/**
+ * rsi_sdio_write_register() - This function writes one byte of information
+ * into a register.
+ * @adapter: Pointer to the adapter structure.
+ * @function: Function Number.
+ * @addr: Address of the register.
+ * @data: Pointer to the data tha has to be written.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_write_register(struct rsi_hw *adapter,
+ u8 function,
+ u32 addr,
+ u8 *data)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status = 0;
+
+ sdio_claim_host(dev->pfunction);
+
+ if (function == 0)
+ sdio_f0_writeb(dev->pfunction, *data, addr, &status);
+ else
+ sdio_writeb(dev->pfunction, *data, addr, &status);
+
+ sdio_release_host(dev->pfunction);
+
+ return status;
+}
+
+/**
+ * rsi_sdio_ack_intr() - This function acks the interrupt received.
+ * @adapter: Pointer to the adapter structure.
+ * @int_bit: Interrupt bit to write into register.
+ *
+ * Return: None.
+ */
+void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit)
+{
+ int status;
+ status = rsi_sdio_write_register(adapter,
+ 1,
+ (SDIO_FUN1_INTR_CLR_REG |
+ RSI_SD_REQUEST_MASTER),
+ &int_bit);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: unable to send ack\n", __func__);
+}
+
+
+
+/**
+ * rsi_sdio_read_register_multiple() - This function read multiple bytes of
+ * information from the SD card.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @count: Number of multiple bytes to be read.
+ * @data: Pointer to the read data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter,
+ u32 addr,
+ u32 count,
+ u8 *data)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u32 status;
+
+ sdio_claim_host(dev->pfunction);
+
+ status = sdio_readsb(dev->pfunction, data, addr, count);
+
+ sdio_release_host(dev->pfunction);
+
+ if (status != 0)
+ rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 read failed\n", __func__);
+ return status;
+}
+
+/**
+ * rsi_sdio_write_register_multiple() - This function writes multiple bytes of
+ * information to the SD card.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that has to be written.
+ * @count: Number of multiple bytes to be written.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_write_register_multiple(struct rsi_hw *adapter,
+ u32 addr,
+ u8 *data,
+ u32 count)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status;
+
+ if (dev->write_fail > 1) {
+ rsi_dbg(ERR_ZONE, "%s: Stopping card writes\n", __func__);
+ return 0;
+ } else if (dev->write_fail == 1) {
+ /**
+ * Assuming it is a CRC failure, we want to allow another
+ * card write
+ */
+ rsi_dbg(ERR_ZONE, "%s: Continue card writes\n", __func__);
+ dev->write_fail++;
+ }
+
+ sdio_claim_host(dev->pfunction);
+
+ status = sdio_writesb(dev->pfunction, addr, data, count);
+
+ sdio_release_host(dev->pfunction);
+
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 write failed %d\n",
+ __func__, status);
+ dev->write_fail = 2;
+ } else {
+ memcpy(dev->prev_desc, data, FRAME_DESC_SZ);
+ }
+ return status;
+}
+
+/**
+ * rsi_sdio_host_intf_write_pkt() - This function writes the packet to device.
+ * @adapter: Pointer to the adapter structure.
+ * @pkt: Pointer to the data to be written on to the device.
+ * @len: length of the data to be written on to the device.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter,
+ u8 *pkt,
+ u32 len)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u32 block_size = dev->tx_blk_size;
+ u32 num_blocks, address, length;
+ u32 queueno;
+ int status;
+
+ queueno = ((pkt[1] >> 4) & 0xf);
+
+ num_blocks = len / block_size;
+
+ if (len % block_size)
+ num_blocks++;
+
+ address = (num_blocks * block_size | (queueno << 12));
+ length = num_blocks * block_size;
+
+ status = rsi_sdio_write_register_multiple(adapter,
+ address,
+ (u8 *)pkt,
+ length);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Unable to write onto the card: %d\n",
+ __func__, status);
+ rsi_dbg(DATA_TX_ZONE, "%s: Successfully written onto card\n", __func__);
+ return status;
+}
+
+/**
+ * rsi_sdio_host_intf_read_pkt() - This function reads the packet
+ from the device.
+ * @adapter: Pointer to the adapter data structure.
+ * @pkt: Pointer to the packet data to be read from the the device.
+ * @length: Length of the data to be read from the device.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter,
+ u8 *pkt,
+ u32 length)
+{
+ int status = -EINVAL;
+
+ if (!length) {
+ rsi_dbg(ERR_ZONE, "%s: Pkt size is zero\n", __func__);
+ return status;
+ }
+
+ status = rsi_sdio_read_register_multiple(adapter,
+ length,
+ length, /*num of bytes*/
+ (u8 *)pkt);
+
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed to read frame: %d\n", __func__,
+ status);
+ return status;
+}
+
+/**
+ * rsi_init_sdio_interface() - This function does init specific to SDIO.
+ *
+ * @adapter: Pointer to the adapter data structure.
+ * @pkt: Pointer to the packet data to be read from the the device.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+
+static int rsi_init_sdio_interface(struct rsi_hw *adapter,
+ struct sdio_func *pfunction)
+{
+ struct rsi_91x_sdiodev *rsi_91x_dev;
+ int status = -ENOMEM;
+
+ rsi_91x_dev = kzalloc(sizeof(*rsi_91x_dev), GFP_KERNEL);
+ if (!rsi_91x_dev)
+ return status;
+
+ adapter->rsi_dev = rsi_91x_dev;
+
+ sdio_claim_host(pfunction);
+
+ pfunction->enable_timeout = 100;
+ status = sdio_enable_func(pfunction);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to enable interface\n", __func__);
+ sdio_release_host(pfunction);
+ return status;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
+
+ rsi_91x_dev->pfunction = pfunction;
+ adapter->device = &pfunction->dev;
+
+ sdio_set_drvdata(pfunction, adapter);
+
+ status = rsi_setupcard(adapter);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to setup card\n", __func__);
+ goto fail;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Setup card succesfully\n", __func__);
+
+ status = rsi_init_sdio_slave_regs(adapter);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init slave regs\n", __func__);
+ goto fail;
+ }
+ sdio_release_host(pfunction);
+
+ adapter->host_intf_write_pkt = rsi_sdio_host_intf_write_pkt;
+ adapter->host_intf_read_pkt = rsi_sdio_host_intf_read_pkt;
+ adapter->determine_event_timeout = rsi_sdio_determine_event_timeout;
+ adapter->check_hw_queue_status = rsi_sdio_read_buffer_status_register;
+
+#ifdef CONFIG_RSI_DEBUGFS
+ adapter->num_debugfs_entries = MAX_DEBUGFS_ENTRIES;
+#endif
+ return status;
+fail:
+ sdio_disable_func(pfunction);
+ sdio_release_host(pfunction);
+ return status;
+}
+
+/**
+ * rsi_probe() - This function is called by kernel when the driver provided
+ * Vendor and device IDs are matched. All the initialization
+ * work is done here.
+ * @pfunction: Pointer to the sdio_func structure.
+ * @id: Pointer to sdio_device_id structure.
+ *
+ * Return: 0 on success, 1 on failure.
+ */
+static int rsi_probe(struct sdio_func *pfunction,
+ const struct sdio_device_id *id)
+{
+ struct rsi_hw *adapter;
+
+ rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
+
+ adapter = rsi_91x_init();
+ if (!adapter) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
+ __func__);
+ return 1;
+ }
+
+ if (rsi_init_sdio_interface(adapter, pfunction)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init sdio interface\n",
+ __func__);
+ goto fail;
+ }
+
+ if (rsi_sdio_device_init(adapter->priv)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in device init\n", __func__);
+ sdio_claim_host(pfunction);
+ sdio_disable_func(pfunction);
+ sdio_release_host(pfunction);
+ goto fail;
+ }
+
+ sdio_claim_host(pfunction);
+ if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to request IRQ\n", __func__);
+ sdio_release_host(pfunction);
+ goto fail;
+ }
+
+ sdio_release_host(pfunction);
+ rsi_dbg(INIT_ZONE, "%s: Registered Interrupt handler\n", __func__);
+
+ return 0;
+fail:
+ rsi_91x_deinit(adapter);
+ rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
+ return 1;
+}
+
+/**
+ * rsi_disconnect() - This function performs the reverse of the probe function.
+ * @pfunction: Pointer to the sdio_func structure.
+ *
+ * Return: void.
+ */
+static void rsi_disconnect(struct sdio_func *pfunction)
+{
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+ if (!adapter)
+ return;
+
+ dev->write_fail = 2;
+ rsi_mac80211_detach(adapter);
+
+ sdio_claim_host(pfunction);
+ sdio_release_irq(pfunction);
+ sdio_disable_func(pfunction);
+ rsi_91x_deinit(adapter);
+ /* Resetting to take care of the case, where-in driver is re-loaded */
+ rsi_reset_card(pfunction);
+ sdio_release_host(pfunction);
+}
+
+#ifdef CONFIG_PM
+static int rsi_suspend(struct device *dev)
+{
+ /* Not yet implemented */
+ return -ENOSYS;
+}
+
+static int rsi_resume(struct device *dev)
+{
+ /* Not yet implemented */
+ return -ENOSYS;
+}
+
+static const struct dev_pm_ops rsi_pm_ops = {
+ .suspend = rsi_suspend,
+ .resume = rsi_resume,
+};
+#endif
+
+static const struct sdio_device_id rsi_dev_table[] = {
+ { SDIO_DEVICE(0x303, 0x100) },
+ { SDIO_DEVICE(0x041B, 0x0301) },
+ { SDIO_DEVICE(0x041B, 0x0201) },
+ { SDIO_DEVICE(0x041B, 0x9330) },
+ { /* Blank */},
+};
+
+static struct sdio_driver rsi_driver = {
+ .name = "RSI-SDIO WLAN",
+ .probe = rsi_probe,
+ .remove = rsi_disconnect,
+ .id_table = rsi_dev_table,
+#ifdef CONFIG_PM
+ .drv = {
+ .pm = &rsi_pm_ops,
+ }
+#endif
+};
+
+/**
+ * rsi_module_init() - This function registers the sdio module.
+ * @void: Void.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_module_init(void)
+{
+ sdio_register_driver(&rsi_driver);
+ rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_module_exit() - This function unregisters the sdio module.
+ * @void: Void.
+ *
+ * Return: None.
+ */
+static void rsi_module_exit(void)
+{
+ sdio_unregister_driver(&rsi_driver);
+ rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
+}
+
+module_init(rsi_module_init);
+module_exit(rsi_module_exit);
+
+MODULE_AUTHOR("Redpine Signals Inc");
+MODULE_DESCRIPTION("Common SDIO layer for RSI drivers");
+MODULE_SUPPORTED_DEVICE("RSI-91x");
+MODULE_DEVICE_TABLE(sdio, rsi_dev_table);
+MODULE_FIRMWARE(FIRMWARE_RSI9113);
+MODULE_VERSION("0.1");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
new file mode 100644
index 000000000000..f1cb99cafed8
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -0,0 +1,566 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "rsi_sdio.h"
+#include "rsi_common.h"
+
+/**
+ * rsi_sdio_master_access_msword() - This function sets the AHB master access
+ * MS word in the SDIO slave registers.
+ * @adapter: Pointer to the adapter structure.
+ * @ms_word: ms word need to be initialized.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_master_access_msword(struct rsi_hw *adapter,
+ u16 ms_word)
+{
+ u8 byte;
+ u8 function = 0;
+ int status = 0;
+
+ byte = (u8)(ms_word & 0x00FF);
+
+ rsi_dbg(INIT_ZONE,
+ "%s: MASTER_ACCESS_MSBYTE:0x%x\n", __func__, byte);
+
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_MASTER_ACCESS_MSBYTE,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: fail to access MASTER_ACCESS_MSBYTE\n",
+ __func__);
+ return -1;
+ }
+
+ byte = (u8)(ms_word >> 8);
+
+ rsi_dbg(INIT_ZONE, "%s:MASTER_ACCESS_LSBYTE:0x%x\n", __func__, byte);
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_MASTER_ACCESS_LSBYTE,
+ &byte);
+ return status;
+}
+
+/**
+ * rsi_copy_to_card() - This function includes the actual funtionality of
+ * copying the TA firmware to the card.Basically this
+ * function includes opening the TA file,reading the
+ * TA file and writing their values in blocks of data.
+ * @common: Pointer to the driver private structure.
+ * @fw: Pointer to the firmware value to be written.
+ * @len: length of firmware file.
+ * @num_blocks: Number of blocks to be written to the card.
+ *
+ * Return: 0 on success and -1 on failure.
+ */
+static int rsi_copy_to_card(struct rsi_common *common,
+ const u8 *fw,
+ u32 len,
+ u32 num_blocks)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u32 indx, ii;
+ u32 block_size = dev->tx_blk_size;
+ u32 lsb_address;
+ __le32 data[] = { TA_HOLD_THREAD_VALUE, TA_SOFT_RST_CLR,
+ TA_PC_ZERO, TA_RELEASE_THREAD_VALUE };
+ u32 address[] = { TA_HOLD_THREAD_REG, TA_SOFT_RESET_REG,
+ TA_TH0_PC_REG, TA_RELEASE_THREAD_REG };
+ u32 base_address;
+ u16 msb_address;
+
+ base_address = TA_LOAD_ADDRESS;
+ msb_address = base_address >> 16;
+
+ for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
+ lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
+ if (rsi_sdio_write_register_multiple(adapter,
+ lsb_address,
+ (u8 *)(fw + indx),
+ block_size)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load %s blk\n", __func__,
+ FIRMWARE_RSI9113);
+ return -1;
+ }
+ rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
+ base_address += block_size;
+ if ((base_address >> 16) != msb_address) {
+ msb_address += 1;
+ if (rsi_sdio_master_access_msword(adapter,
+ msb_address)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word reg\n",
+ __func__);
+ return -1;
+ }
+ }
+ }
+
+ if (len % block_size) {
+ lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
+ if (rsi_sdio_write_register_multiple(adapter,
+ lsb_address,
+ (u8 *)(fw + indx),
+ len % block_size)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load f/w\n", __func__);
+ return -1;
+ }
+ }
+ rsi_dbg(INIT_ZONE,
+ "%s: Succesfully loaded TA instructions\n", __func__);
+
+ if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word to common reg\n",
+ __func__);
+ return -1;
+ }
+
+ for (ii = 0; ii < ARRAY_SIZE(data); ii++) {
+ /* Bringing TA out of reset */
+ if (rsi_sdio_write_register_multiple(adapter,
+ (address[ii] |
+ RSI_SD_REQUEST_MASTER),
+ (u8 *)&data[ii],
+ 4)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to hold TA threads\n", __func__);
+ return -1;
+ }
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_load_ta_instructions() - This function includes the actual funtionality
+ * of loading the TA firmware.This function also
+ * includes opening the TA file,reading the TA
+ * file and writing their value in blocks of data.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_load_ta_instructions(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u32 len;
+ u32 num_blocks;
+ const u8 *fw;
+ const struct firmware *fw_entry = NULL;
+ u32 block_size = dev->tx_blk_size;
+ int status = 0;
+ u32 base_address;
+ u16 msb_address;
+
+ if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word to common reg\n",
+ __func__);
+ return -1;
+ }
+ base_address = TA_LOAD_ADDRESS;
+ msb_address = (base_address >> 16);
+
+ if (rsi_sdio_master_access_msword(adapter, msb_address)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to set ms word reg\n", __func__);
+ return -1;
+ }
+
+ status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
+ __func__, FIRMWARE_RSI9113);
+ return status;
+ }
+
+ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ len = fw_entry->size;
+
+ if (len % 4)
+ len += (4 - (len % 4));
+
+ num_blocks = (len / block_size);
+
+ rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
+ rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
+
+ status = rsi_copy_to_card(common, fw, len, num_blocks);
+ release_firmware(fw_entry);
+ return status;
+}
+
+/**
+ * rsi_process_pkt() - This Function reads rx_blocks register and figures out
+ * the size of the rx pkt.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_process_pkt(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ u8 num_blks = 0;
+ u32 rcv_pkt_len = 0;
+ int status = 0;
+
+ status = rsi_sdio_read_register(adapter,
+ SDIO_RX_NUM_BLOCKS_REG,
+ &num_blks);
+
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read pkt length from the card:\n",
+ __func__);
+ return status;
+ }
+ rcv_pkt_len = (num_blks * 256);
+
+ common->rx_data_pkt = kmalloc(rcv_pkt_len, GFP_KERNEL);
+ if (!common->rx_data_pkt) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n",
+ __func__);
+ return -1;
+ }
+
+ status = rsi_sdio_host_intf_read_pkt(adapter,
+ common->rx_data_pkt,
+ rcv_pkt_len);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to read packet from card\n",
+ __func__);
+ goto fail;
+ }
+
+ status = rsi_read_pkt(common, rcv_pkt_len);
+ kfree(common->rx_data_pkt);
+ return status;
+
+fail:
+ kfree(common->rx_data_pkt);
+ return -1;
+}
+
+/**
+ * rsi_init_sdio_slave_regs() - This function does the actual initialization
+ * of SDBUS slave registers.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_init_sdio_slave_regs(struct rsi_hw *adapter)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u8 function = 0;
+ u8 byte;
+ int status = 0;
+
+ if (dev->next_read_delay) {
+ byte = dev->next_read_delay;
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_NXT_RD_DELAY2,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write SDIO_NXT_RD_DELAY2\n",
+ __func__);
+ return -1;
+ }
+ }
+
+ if (dev->sdio_high_speed_enable) {
+ rsi_dbg(INIT_ZONE, "%s: Enabling SDIO High speed\n", __func__);
+ byte = 0x3;
+
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_REG_HIGH_SPEED,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to enable SDIO high speed\n",
+ __func__);
+ return -1;
+ }
+ }
+
+ /* This tells SDIO FIFO when to start read to host */
+ rsi_dbg(INIT_ZONE, "%s: Initialzing SDIO read start level\n", __func__);
+ byte = 0x24;
+
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_READ_START_LVL,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write SDIO_READ_START_LVL\n", __func__);
+ return -1;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Initialzing FIFO ctrl registers\n", __func__);
+ byte = (128 - 32);
+
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_READ_FIFO_CTL,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write SDIO_READ_FIFO_CTL\n", __func__);
+ return -1;
+ }
+
+ byte = 32;
+ status = rsi_sdio_write_register(adapter,
+ function,
+ SDIO_WRITE_FIFO_CTL,
+ &byte);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write SDIO_WRITE_FIFO_CTL\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * rsi_interrupt_handler() - This function read and process SDIO interrupts.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_interrupt_handler(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ int status;
+ enum sdio_interrupt_type isr_type;
+ u8 isr_status = 0;
+ u8 fw_status = 0;
+
+ dev->rx_info.sdio_int_counter++;
+
+ do {
+ mutex_lock(&common->tx_rxlock);
+ status = rsi_sdio_read_register(common->priv,
+ RSI_FN1_INT_REGISTER,
+ &isr_status);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to Read Intr Status Register\n",
+ __func__);
+ mutex_unlock(&common->tx_rxlock);
+ return;
+ }
+
+ if (isr_status == 0) {
+ rsi_set_event(&common->tx_thread.event);
+ dev->rx_info.sdio_intr_status_zero++;
+ mutex_unlock(&common->tx_rxlock);
+ return;
+ }
+
+ rsi_dbg(ISR_ZONE, "%s: Intr_status = %x %d %d\n",
+ __func__, isr_status, (1 << MSDU_PKT_PENDING),
+ (1 << FW_ASSERT_IND));
+
+ do {
+ RSI_GET_SDIO_INTERRUPT_TYPE(isr_status, isr_type);
+
+ switch (isr_type) {
+ case BUFFER_AVAILABLE:
+ dev->rx_info.watch_bufferfull_count = 0;
+ dev->rx_info.buffer_full = false;
+ dev->rx_info.mgmt_buffer_full = false;
+ rsi_sdio_ack_intr(common->priv,
+ (1 << PKT_BUFF_AVAILABLE));
+ rsi_set_event((&common->tx_thread.event));
+ rsi_dbg(ISR_ZONE,
+ "%s: ==> BUFFER_AVILABLE <==\n",
+ __func__);
+ dev->rx_info.buf_avilable_counter++;
+ break;
+
+ case FIRMWARE_ASSERT_IND:
+ rsi_dbg(ERR_ZONE,
+ "%s: ==> FIRMWARE Assert <==\n",
+ __func__);
+ status = rsi_sdio_read_register(common->priv,
+ SDIO_FW_STATUS_REG,
+ &fw_status);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read f/w reg\n",
+ __func__);
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Firmware Status is 0x%x\n",
+ __func__ , fw_status);
+ rsi_sdio_ack_intr(common->priv,
+ (1 << FW_ASSERT_IND));
+ }
+
+ common->fsm_state = FSM_CARD_NOT_READY;
+ break;
+
+ case MSDU_PACKET_PENDING:
+ rsi_dbg(ISR_ZONE, "Pkt pending interrupt\n");
+ dev->rx_info.total_sdio_msdu_pending_intr++;
+
+ status = rsi_process_pkt(common);
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read pkt\n",
+ __func__);
+ mutex_unlock(&common->tx_rxlock);
+ return;
+ }
+ break;
+ default:
+ rsi_sdio_ack_intr(common->priv, isr_status);
+ dev->rx_info.total_sdio_unknown_intr++;
+ isr_status = 0;
+ rsi_dbg(ISR_ZONE,
+ "Unknown Interrupt %x\n",
+ isr_status);
+ break;
+ }
+ isr_status ^= BIT(isr_type - 1);
+ } while (isr_status);
+ mutex_unlock(&common->tx_rxlock);
+ } while (1);
+}
+
+/**
+ * rsi_device_init() - This Function Initializes The HAL.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_device_init(struct rsi_common *common)
+{
+ if (rsi_load_ta_instructions(common))
+ return -1;
+
+ if (rsi_sdio_master_access_msword(common->priv, MISC_CFG_BASE_ADDR)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n",
+ __func__);
+ return -1;
+ }
+ rsi_dbg(INIT_ZONE,
+ "%s: Setting ms word to 0x41050000\n", __func__);
+
+ return 0;
+}
+
+/**
+ * rsi_sdio_read_buffer_status_register() - This function is used to the read
+ * buffer status register and set
+ * relevant fields in
+ * rsi_91x_sdiodev struct.
+ * @adapter: Pointer to the driver hw structure.
+ * @q_num: The Q number whose status is to be found.
+ *
+ * Return: status: -1 on failure or else queue full/stop is indicated.
+ */
+int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num)
+{
+ struct rsi_common *common = adapter->priv;
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ u8 buf_status = 0;
+ int status = 0;
+
+ status = rsi_sdio_read_register(common->priv,
+ RSI_DEVICE_BUFFER_STATUS_REGISTER,
+ &buf_status);
+
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read status register\n", __func__);
+ return -1;
+ }
+
+ if (buf_status & (BIT(PKT_MGMT_BUFF_FULL))) {
+ if (!dev->rx_info.mgmt_buffer_full)
+ dev->rx_info.mgmt_buf_full_counter++;
+ dev->rx_info.mgmt_buffer_full = true;
+ } else {
+ dev->rx_info.mgmt_buffer_full = false;
+ }
+
+ if (buf_status & (BIT(PKT_BUFF_FULL))) {
+ if (!dev->rx_info.buffer_full)
+ dev->rx_info.buf_full_counter++;
+ dev->rx_info.buffer_full = true;
+ } else {
+ dev->rx_info.buffer_full = false;
+ }
+
+ if (buf_status & (BIT(PKT_BUFF_SEMI_FULL))) {
+ if (!dev->rx_info.semi_buffer_full)
+ dev->rx_info.buf_semi_full_counter++;
+ dev->rx_info.semi_buffer_full = true;
+ } else {
+ dev->rx_info.semi_buffer_full = false;
+ }
+
+ if ((q_num == MGMT_SOFT_Q) && (dev->rx_info.mgmt_buffer_full))
+ return QUEUE_FULL;
+
+ if (dev->rx_info.buffer_full)
+ return QUEUE_FULL;
+
+ return QUEUE_NOT_FULL;
+}
+
+/**
+ * rsi_sdio_determine_event_timeout() - This Function determines the event
+ * timeout duration.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: timeout duration is returned.
+ */
+int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+ /* Once buffer full is seen, event timeout to occur every 2 msecs */
+ if (dev->rx_info.buffer_full)
+ return 2;
+
+ return EVENT_WAIT_FOREVER;
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
new file mode 100644
index 000000000000..bb1bf96670eb
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -0,0 +1,575 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include "rsi_usb.h"
+
+/**
+ * rsi_usb_card_write() - This function writes to the USB Card.
+ * @adapter: Pointer to the adapter structure.
+ * @buf: Pointer to the buffer from where the data has to be taken.
+ * @len: Length to be written.
+ * @endpoint: Type of endpoint.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_usb_card_write(struct rsi_hw *adapter,
+ void *buf,
+ u16 len,
+ u8 endpoint)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ int status;
+ s32 transfer;
+
+ status = usb_bulk_msg(dev->usbdev,
+ usb_sndbulkpipe(dev->usbdev,
+ dev->bulkout_endpoint_addr[endpoint - 1]),
+ buf,
+ len,
+ &transfer,
+ HZ * 5);
+
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Card write failed with error code :%10d\n", status);
+ dev->write_fail = 1;
+ }
+ return status;
+}
+
+/**
+ * rsi_write_multiple() - This function writes multiple bytes of information
+ * to the USB card.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that has to be written.
+ * @count: Number of multiple bytes to be written.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_write_multiple(struct rsi_hw *adapter,
+ u8 endpoint,
+ u8 *data,
+ u32 count)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ u8 *seg = dev->tx_buffer;
+
+ if (dev->write_fail)
+ return 0;
+
+ if (endpoint == MGMT_EP) {
+ memset(seg, 0, RSI_USB_TX_HEAD_ROOM);
+ memcpy(seg + RSI_USB_TX_HEAD_ROOM, data, count);
+ } else {
+ seg = ((u8 *)data - RSI_USB_TX_HEAD_ROOM);
+ }
+
+ return rsi_usb_card_write(adapter,
+ seg,
+ count + RSI_USB_TX_HEAD_ROOM,
+ endpoint);
+}
+
+/**
+ * rsi_find_bulk_in_and_out_endpoints() - This function initializes the bulk
+ * endpoints to the device.
+ * @interface: Pointer to the USB interface structure.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: ret_val: 0 on success, -ENOMEM on failure.
+ */
+static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
+ struct rsi_hw *adapter)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ __le16 buffer_size;
+ int ii, bep_found = 0;
+
+ iface_desc = &(interface->altsetting[0]);
+
+ for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
+ endpoint = &(iface_desc->endpoint[ii].desc);
+
+ if ((!(dev->bulkin_endpoint_addr)) &&
+ (endpoint->bEndpointAddress & USB_DIR_IN) &&
+ ((endpoint->bmAttributes &
+ USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ buffer_size = endpoint->wMaxPacketSize;
+ dev->bulkin_size = buffer_size;
+ dev->bulkin_endpoint_addr =
+ endpoint->bEndpointAddress;
+ }
+
+ if (!dev->bulkout_endpoint_addr[bep_found] &&
+ !(endpoint->bEndpointAddress & USB_DIR_IN) &&
+ ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK)) {
+ dev->bulkout_endpoint_addr[bep_found] =
+ endpoint->bEndpointAddress;
+ buffer_size = endpoint->wMaxPacketSize;
+ dev->bulkout_size[bep_found] = buffer_size;
+ bep_found++;
+ }
+
+ if (bep_found >= MAX_BULK_EP)
+ break;
+ }
+
+ if (!(dev->bulkin_endpoint_addr) &&
+ (dev->bulkout_endpoint_addr[0]))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* rsi_usb_reg_read() - This function reads data from given register address.
+ * @usbdev: Pointer to the usb_device structure.
+ * @reg: Address of the register to be read.
+ * @value: Value to be read.
+ * @len: length of data to be read.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_usb_reg_read(struct usb_device *usbdev,
+ u32 reg,
+ u16 *value,
+ u16 len)
+{
+ u8 temp_buf[4];
+ int status = 0;
+
+ status = usb_control_msg(usbdev,
+ usb_rcvctrlpipe(usbdev, 0),
+ USB_VENDOR_REGISTER_READ,
+ USB_TYPE_VENDOR,
+ ((reg & 0xffff0000) >> 16), (reg & 0xffff),
+ (void *)temp_buf,
+ len,
+ HZ * 5);
+
+ *value = (temp_buf[0] | (temp_buf[1] << 8));
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Reg read failed with error code :%d\n",
+ __func__, status);
+ }
+ return status;
+}
+
+/**
+ * rsi_usb_reg_write() - This function writes the given data into the given
+ * register address.
+ * @usbdev: Pointer to the usb_device structure.
+ * @reg: Address of the register.
+ * @value: Value to write.
+ * @len: Length of data to be written.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_usb_reg_write(struct usb_device *usbdev,
+ u32 reg,
+ u16 value,
+ u16 len)
+{
+ u8 usb_reg_buf[4];
+ int status = 0;
+
+ usb_reg_buf[0] = (value & 0x00ff);
+ usb_reg_buf[1] = (value & 0xff00) >> 8;
+ usb_reg_buf[2] = 0x0;
+ usb_reg_buf[3] = 0x0;
+
+ status = usb_control_msg(usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+ USB_VENDOR_REGISTER_WRITE,
+ USB_TYPE_VENDOR,
+ ((reg & 0xffff0000) >> 16),
+ (reg & 0xffff),
+ (void *)usb_reg_buf,
+ len,
+ HZ * 5);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Reg write failed with error code :%d\n",
+ __func__, status);
+ }
+ return status;
+}
+
+/**
+ * rsi_rx_done_handler() - This function is called when a packet is received
+ * from USB stack. This is callback to recieve done.
+ * @urb: Received URB.
+ *
+ * Return: None.
+ */
+static void rsi_rx_done_handler(struct urb *urb)
+{
+ struct rsi_hw *adapter = urb->context;
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+
+ if (urb->status)
+ return;
+
+ rsi_set_event(&dev->rx_thread.event);
+}
+
+/**
+ * rsi_rx_urb_submit() - This function submits the given URB to the USB stack.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_rx_urb_submit(struct rsi_hw *adapter)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ struct urb *urb = dev->rx_usb_urb[0];
+ int status;
+
+ usb_fill_bulk_urb(urb,
+ dev->usbdev,
+ usb_rcvbulkpipe(dev->usbdev,
+ dev->bulkin_endpoint_addr),
+ urb->transfer_buffer,
+ 3000,
+ rsi_rx_done_handler,
+ adapter);
+
+ status = usb_submit_urb(urb, GFP_KERNEL);
+ if (status)
+ rsi_dbg(ERR_ZONE, "%s: Failed in urb submission\n", __func__);
+
+ return status;
+}
+
+/**
+ * rsi_usb_write_register_multiple() - This function writes multiple bytes of
+ * information to multiple registers.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that has to be written.
+ * @count: Number of multiple bytes to be written on to the registers.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
+ u32 addr,
+ u8 *data,
+ u32 count)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ u8 *buf;
+ u8 transfer;
+ int status = 0;
+
+ buf = kzalloc(4096, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (count) {
+ transfer = min_t(int, count, 4096);
+ memcpy(buf, data, transfer);
+ status = usb_control_msg(dev->usbdev,
+ usb_sndctrlpipe(dev->usbdev, 0),
+ USB_VENDOR_REGISTER_WRITE,
+ USB_TYPE_VENDOR,
+ ((addr & 0xffff0000) >> 16),
+ (addr & 0xffff),
+ (void *)buf,
+ transfer,
+ HZ * 5);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Reg write failed with error code :%d\n",
+ status);
+ } else {
+ count -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+/**
+ *rsi_usb_host_intf_write_pkt() - This function writes the packet to the
+ * USB card.
+ * @adapter: Pointer to the adapter structure.
+ * @pkt: Pointer to the data to be written on to the card.
+ * @len: Length of the data to be written on to the card.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter,
+ u8 *pkt,
+ u32 len)
+{
+ u32 queueno = ((pkt[1] >> 4) & 0xf);
+ u8 endpoint;
+
+ endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? MGMT_EP : DATA_EP);
+
+ return rsi_write_multiple(adapter,
+ endpoint,
+ (u8 *)pkt,
+ len);
+}
+
+/**
+ * rsi_deinit_usb_interface() - This function deinitializes the usb interface.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+static void rsi_deinit_usb_interface(struct rsi_hw *adapter)
+{
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+
+ rsi_kill_thread(&dev->rx_thread);
+ kfree(adapter->priv->rx_data_pkt);
+ kfree(dev->tx_buffer);
+}
+
+/**
+ * rsi_init_usb_interface() - This function initializes the usb interface.
+ * @adapter: Pointer to the adapter structure.
+ * @pfunction: Pointer to USB interface structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_init_usb_interface(struct rsi_hw *adapter,
+ struct usb_interface *pfunction)
+{
+ struct rsi_91x_usbdev *rsi_dev;
+ struct rsi_common *common = adapter->priv;
+ int status;
+
+ rsi_dev = kzalloc(sizeof(*rsi_dev), GFP_KERNEL);
+ if (!rsi_dev)
+ return -ENOMEM;
+
+ adapter->rsi_dev = rsi_dev;
+ rsi_dev->usbdev = interface_to_usbdev(pfunction);
+
+ if (rsi_find_bulk_in_and_out_endpoints(pfunction, adapter))
+ return -EINVAL;
+
+ adapter->device = &pfunction->dev;
+ usb_set_intfdata(pfunction, adapter);
+
+ common->rx_data_pkt = kmalloc(2048, GFP_KERNEL);
+ if (!common->rx_data_pkt) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to allocate memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ rsi_dev->tx_buffer = kmalloc(2048, GFP_ATOMIC);
+ rsi_dev->rx_usb_urb[0] = usb_alloc_urb(0, GFP_KERNEL);
+ rsi_dev->rx_usb_urb[0]->transfer_buffer = adapter->priv->rx_data_pkt;
+ rsi_dev->tx_blk_size = 252;
+
+ /* Initializing function callbacks */
+ adapter->rx_urb_submit = rsi_rx_urb_submit;
+ adapter->host_intf_write_pkt = rsi_usb_host_intf_write_pkt;
+ adapter->check_hw_queue_status = rsi_usb_check_queue_status;
+ adapter->determine_event_timeout = rsi_usb_event_timeout;
+
+ rsi_init_event(&rsi_dev->rx_thread.event);
+ status = rsi_create_kthread(common, &rsi_dev->rx_thread,
+ rsi_usb_rx_thread, "RX-Thread");
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__);
+ goto fail;
+ }
+
+#ifdef CONFIG_RSI_DEBUGFS
+ /* In USB, one less than the MAX_DEBUGFS_ENTRIES entries is required */
+ adapter->num_debugfs_entries = (MAX_DEBUGFS_ENTRIES - 1);
+#endif
+
+ rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
+ return 0;
+
+fail:
+ kfree(rsi_dev->tx_buffer);
+ kfree(common->rx_data_pkt);
+ return status;
+}
+
+/**
+ * rsi_probe() - This function is called by kernel when the driver provided
+ * Vendor and device IDs are matched. All the initialization
+ * work is done here.
+ * @pfunction: Pointer to the USB interface structure.
+ * @id: Pointer to the usb_device_id structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_probe(struct usb_interface *pfunction,
+ const struct usb_device_id *id)
+{
+ struct rsi_hw *adapter;
+ struct rsi_91x_usbdev *dev;
+ u16 fw_status;
+
+ rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
+
+ adapter = rsi_91x_init();
+ if (!adapter) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
+ __func__);
+ return 1;
+ }
+
+ if (rsi_init_usb_interface(adapter, pfunction)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to init usb interface\n",
+ __func__);
+ goto err;
+ }
+
+ rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
+
+ dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+
+ if (rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2) < 0)
+ goto err1;
+ else
+ fw_status &= 1;
+
+ if (!fw_status) {
+ if (rsi_usb_device_init(adapter->priv)) {
+ rsi_dbg(ERR_ZONE, "%s: Failed in device init\n",
+ __func__);
+ goto err1;
+ }
+
+ if (rsi_usb_reg_write(dev->usbdev,
+ USB_INTERNAL_REG_1,
+ RSI_USB_READY_MAGIC_NUM, 1) < 0)
+ goto err1;
+ rsi_dbg(INIT_ZONE, "%s: Performed device init\n", __func__);
+ }
+
+ if (rsi_rx_urb_submit(adapter))
+ goto err1;
+
+ return 0;
+err1:
+ rsi_deinit_usb_interface(adapter);
+err:
+ rsi_91x_deinit(adapter);
+ rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
+ return 1;
+}
+
+/**
+ * rsi_disconnect() - This function performs the reverse of the probe function,
+ * it deintialize the driver structure.
+ * @pfunction: Pointer to the USB interface structure.
+ *
+ * Return: None.
+ */
+static void rsi_disconnect(struct usb_interface *pfunction)
+{
+ struct rsi_hw *adapter = usb_get_intfdata(pfunction);
+
+ if (!adapter)
+ return;
+
+ rsi_mac80211_detach(adapter);
+ rsi_deinit_usb_interface(adapter);
+ rsi_91x_deinit(adapter);
+
+ rsi_dbg(INFO_ZONE, "%s: Deinitialization completed\n", __func__);
+}
+
+#ifdef CONFIG_PM
+static int rsi_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ /* Not yet implemented */
+ return -ENOSYS;
+}
+
+static int rsi_resume(struct usb_interface *intf)
+{
+ /* Not yet implemented */
+ return -ENOSYS;
+}
+#endif
+
+static const struct usb_device_id rsi_dev_table[] = {
+ { USB_DEVICE(0x0303, 0x0100) },
+ { USB_DEVICE(0x041B, 0x0301) },
+ { USB_DEVICE(0x041B, 0x0201) },
+ { USB_DEVICE(0x041B, 0x9330) },
+ { /* Blank */},
+};
+
+static struct usb_driver rsi_driver = {
+ .name = "RSI-USB WLAN",
+ .probe = rsi_probe,
+ .disconnect = rsi_disconnect,
+ .id_table = rsi_dev_table,
+#ifdef CONFIG_PM
+ .suspend = rsi_suspend,
+ .resume = rsi_resume,
+#endif
+};
+
+/**
+ * rsi_module_init() - This function registers the client driver.
+ * @void: Void.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_module_init(void)
+{
+ usb_register(&rsi_driver);
+ rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_module_exit() - This function unregisters the client driver.
+ * @void: Void.
+ *
+ * Return: None.
+ */
+static void rsi_module_exit(void)
+{
+ usb_deregister(&rsi_driver);
+ rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
+}
+
+module_init(rsi_module_init);
+module_exit(rsi_module_exit);
+
+MODULE_AUTHOR("Redpine Signals Inc");
+MODULE_DESCRIPTION("Common USB layer for RSI drivers");
+MODULE_SUPPORTED_DEVICE("RSI-91x");
+MODULE_DEVICE_TABLE(usb, rsi_dev_table);
+MODULE_FIRMWARE(FIRMWARE_RSI9113);
+MODULE_VERSION("0.1");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
new file mode 100644
index 000000000000..1106ce76707e
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -0,0 +1,177 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "rsi_usb.h"
+
+/**
+ * rsi_copy_to_card() - This function includes the actual funtionality of
+ * copying the TA firmware to the card.Basically this
+ * function includes opening the TA file,reading the TA
+ * file and writing their values in blocks of data.
+ * @common: Pointer to the driver private structure.
+ * @fw: Pointer to the firmware value to be written.
+ * @len: length of firmware file.
+ * @num_blocks: Number of blocks to be written to the card.
+ *
+ * Return: 0 on success and -1 on failure.
+ */
+static int rsi_copy_to_card(struct rsi_common *common,
+ const u8 *fw,
+ u32 len,
+ u32 num_blocks)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ u32 indx, ii;
+ u32 block_size = dev->tx_blk_size;
+ u32 lsb_address;
+ u32 base_address;
+
+ base_address = TA_LOAD_ADDRESS;
+
+ for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
+ lsb_address = base_address;
+ if (rsi_usb_write_register_multiple(adapter,
+ lsb_address,
+ (u8 *)(fw + indx),
+ block_size)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load %s blk\n", __func__,
+ FIRMWARE_RSI9113);
+ return -EIO;
+ }
+ rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
+ base_address += block_size;
+ }
+
+ if (len % block_size) {
+ lsb_address = base_address;
+ if (rsi_usb_write_register_multiple(adapter,
+ lsb_address,
+ (u8 *)(fw + indx),
+ len % block_size)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load %s blk\n", __func__,
+ FIRMWARE_RSI9113);
+ return -EIO;
+ }
+ }
+ rsi_dbg(INIT_ZONE,
+ "%s: Succesfully loaded %s instructions\n", __func__,
+ FIRMWARE_RSI9113);
+
+ rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
+ return 0;
+}
+
+/**
+ * rsi_usb_rx_thread() - This is a kernel thread to receive the packets from
+ * the USB device.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+void rsi_usb_rx_thread(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ int status;
+
+ do {
+ rsi_wait_event(&dev->rx_thread.event, EVENT_WAIT_FOREVER);
+
+ if (atomic_read(&dev->rx_thread.thread_done))
+ goto out;
+
+ mutex_lock(&common->tx_rxlock);
+ status = rsi_read_pkt(common, 0);
+ if (status) {
+ rsi_dbg(ERR_ZONE, "%s: Failed To read data", __func__);
+ mutex_unlock(&common->tx_rxlock);
+ return;
+ }
+ mutex_unlock(&common->tx_rxlock);
+ rsi_reset_event(&dev->rx_thread.event);
+ if (adapter->rx_urb_submit(adapter)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed in urb submission", __func__);
+ return;
+ }
+ } while (1);
+
+out:
+ rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__);
+ complete_and_exit(&dev->rx_thread.completion, 0);
+}
+
+
+/**
+ * rsi_load_ta_instructions() - This function includes the actual funtionality
+ * of loading the TA firmware.This function also
+ * includes opening the TA file,reading the TA
+ * file and writing their value in blocks of data.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_load_ta_instructions(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ const struct firmware *fw_entry = NULL;
+ u32 block_size = dev->tx_blk_size;
+ const u8 *fw;
+ u32 num_blocks, len;
+ int status = 0;
+
+ status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
+ __func__, FIRMWARE_RSI9113);
+ return status;
+ }
+
+ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ len = fw_entry->size;
+
+ if (len % 4)
+ len += (4 - (len % 4));
+
+ num_blocks = (len / block_size);
+
+ rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
+ rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
+
+ status = rsi_copy_to_card(common, fw, len, num_blocks);
+ release_firmware(fw_entry);
+ return status;
+}
+
+/**
+ * rsi_device_init() - This Function Initializes The HAL.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_usb_device_init(struct rsi_common *common)
+{
+ if (rsi_load_ta_instructions(common))
+ return -EIO;
+
+ return 0;
+ }
diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h
new file mode 100644
index 000000000000..5e2721f7909c
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_boot_params.h
@@ -0,0 +1,126 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_BOOTPARAMS_HEADER_H__
+#define __RSI_BOOTPARAMS_HEADER_H__
+
+#define CRYSTAL_GOOD_TIME BIT(0)
+#define BOOTUP_MODE_INFO BIT(1)
+#define WIFI_TAPLL_CONFIGS BIT(5)
+#define WIFI_PLL960_CONFIGS BIT(6)
+#define WIFI_AFEPLL_CONFIGS BIT(7)
+#define WIFI_SWITCH_CLK_CONFIGS BIT(8)
+
+#define TA_PLL_M_VAL_20 8
+#define TA_PLL_N_VAL_20 1
+#define TA_PLL_P_VAL_20 4
+
+#define PLL960_M_VAL_20 0x14
+#define PLL960_N_VAL_20 0
+#define PLL960_P_VAL_20 5
+
+#define UMAC_CLK_40MHZ 40
+
+#define TA_PLL_M_VAL_40 46
+#define TA_PLL_N_VAL_40 3
+#define TA_PLL_P_VAL_40 3
+
+#define PLL960_M_VAL_40 0x14
+#define PLL960_N_VAL_40 0
+#define PLL960_P_VAL_40 5
+
+#define UMAC_CLK_20BW \
+ (((TA_PLL_M_VAL_20 + 1) * 40) / \
+ ((TA_PLL_N_VAL_20 + 1) * (TA_PLL_P_VAL_20 + 1)))
+#define VALID_20 \
+ (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS)
+#define UMAC_CLK_40BW \
+ (((TA_PLL_M_VAL_40 + 1) * 40) / \
+ ((TA_PLL_N_VAL_40 + 1) * (TA_PLL_P_VAL_40 + 1)))
+#define VALID_40 \
+ (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS | \
+ WIFI_TAPLL_CONFIGS | CRYSTAL_GOOD_TIME | BOOTUP_MODE_INFO)
+
+/* structure to store configs related to TAPLL programming */
+struct tapll_info {
+ __le16 pll_reg_1;
+ __le16 pll_reg_2;
+} __packed;
+
+/* structure to store configs related to PLL960 programming */
+struct pll960_info {
+ __le16 pll_reg_1;
+ __le16 pll_reg_2;
+ __le16 pll_reg_3;
+} __packed;
+
+/* structure to store configs related to AFEPLL programming */
+struct afepll_info {
+ __le16 pll_reg;
+} __packed;
+
+/* structure to store configs related to pll configs */
+struct pll_config {
+ struct tapll_info tapll_info_g;
+ struct pll960_info pll960_info_g;
+ struct afepll_info afepll_info_g;
+} __packed;
+
+/* structure to store configs related to UMAC clk programming */
+struct switch_clk {
+ __le16 switch_clk_info;
+ /* If switch_bbp_lmac_clk_reg is set then this value will be programmed
+ * into reg
+ */
+ __le16 bbp_lmac_clk_reg_val;
+ /* if switch_umac_clk is set then this value will be programmed */
+ __le16 umac_clock_reg_config;
+ /* if switch_qspi_clk is set then this value will be programmed */
+ __le16 qspi_uart_clock_reg_config;
+} __packed;
+
+struct device_clk_info {
+ struct pll_config pll_config_g;
+ struct switch_clk switch_clk_g;
+} __packed;
+
+struct bootup_params {
+ __le16 magic_number;
+ __le16 crystal_good_time;
+ __le32 valid;
+ __le32 reserved_for_valids;
+ __le16 bootup_mode_info;
+ /* configuration used for digital loop back */
+ __le16 digital_loop_back_params;
+ __le16 rtls_timestamp_en;
+ __le16 host_spi_intr_cfg;
+ struct device_clk_info device_clk_info[3];
+ /* ulp buckboost wait time */
+ __le32 buckboost_wakeup_cnt;
+ /* pmu wakeup wait time & WDT EN info */
+ __le16 pmu_wakeup_wait;
+ u8 shutdown_wait_time;
+ /* Sleep clock source selection */
+ u8 pmu_slp_clkout_sel;
+ /* WDT programming values */
+ __le32 wdt_prog_value;
+ /* WDT soc reset delay */
+ __le32 wdt_soc_rst_delay;
+ /* dcdc modes configs */
+ __le32 dcdc_operation_mode;
+ __le32 soc_reset_wait_cnt;
+} __packed;
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
new file mode 100644
index 000000000000..f2f70784d4ad
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -0,0 +1,87 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_COMMON_H__
+#define __RSI_COMMON_H__
+
+#include <linux/kthread.h>
+
+#define EVENT_WAIT_FOREVER 0
+#define TA_LOAD_ADDRESS 0x00
+#define FIRMWARE_RSI9113 "rsi_91x.fw"
+#define QUEUE_NOT_FULL 1
+#define QUEUE_FULL 0
+
+static inline int rsi_init_event(struct rsi_event *pevent)
+{
+ atomic_set(&pevent->event_condition, 1);
+ init_waitqueue_head(&pevent->event_queue);
+ return 0;
+}
+
+static inline int rsi_wait_event(struct rsi_event *event, u32 timeout)
+{
+ int status = 0;
+
+ if (!timeout)
+ status = wait_event_interruptible(event->event_queue,
+ (atomic_read(&event->event_condition) == 0));
+ else
+ status = wait_event_interruptible_timeout(event->event_queue,
+ (atomic_read(&event->event_condition) == 0),
+ timeout);
+ return status;
+}
+
+static inline void rsi_set_event(struct rsi_event *event)
+{
+ atomic_set(&event->event_condition, 0);
+ wake_up_interruptible(&event->event_queue);
+}
+
+static inline void rsi_reset_event(struct rsi_event *event)
+{
+ atomic_set(&event->event_condition, 1);
+}
+
+static inline int rsi_create_kthread(struct rsi_common *common,
+ struct rsi_thread *thread,
+ void *func_ptr,
+ u8 *name)
+{
+ init_completion(&thread->completion);
+ thread->task = kthread_run(func_ptr, common, name);
+ if (IS_ERR(thread->task))
+ return (int)PTR_ERR(thread->task);
+
+ return 0;
+}
+
+static inline int rsi_kill_thread(struct rsi_thread *handle)
+{
+ atomic_inc(&handle->thread_done);
+ rsi_set_event(&handle->event);
+
+ wait_for_completion(&handle->completion);
+ return kthread_stop(handle->task);
+}
+
+void rsi_mac80211_detach(struct rsi_hw *hw);
+u16 rsi_get_connected_channel(struct rsi_hw *adapter);
+struct rsi_hw *rsi_91x_init(void);
+void rsi_91x_deinit(struct rsi_hw *adapter);
+int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len);
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h
new file mode 100644
index 000000000000..580ad3b3f710
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_debugfs.h
@@ -0,0 +1,48 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_DEBUGFS_H__
+#define __RSI_DEBUGFS_H__
+
+#include "rsi_main.h"
+#include <linux/debugfs.h>
+
+#ifndef CONFIG_RSI_DEBUGFS
+static inline int rsi_init_dbgfs(struct rsi_hw *adapter)
+{
+ return 0;
+}
+
+static inline void rsi_remove_dbgfs(struct rsi_hw *adapter)
+{
+ return;
+}
+#else
+struct rsi_dbg_files {
+ const char *name;
+ umode_t perms;
+ const struct file_operations fops;
+};
+
+struct rsi_debugfs {
+ struct dentry *subdir;
+ struct rsi_dbg_ops *dfs_get_ops;
+ struct dentry *rsi_files[MAX_DEBUGFS_ENTRIES];
+};
+int rsi_init_dbgfs(struct rsi_hw *adapter);
+void rsi_remove_dbgfs(struct rsi_hw *adapter);
+#endif
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
new file mode 100644
index 000000000000..2cb73e7edb98
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -0,0 +1,218 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_MAIN_H__
+#define __RSI_MAIN_H__
+
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#define ERR_ZONE BIT(0) /* For Error Msgs */
+#define INFO_ZONE BIT(1) /* For General Status Msgs */
+#define INIT_ZONE BIT(2) /* For Driver Init Seq Msgs */
+#define MGMT_TX_ZONE BIT(3) /* For TX Mgmt Path Msgs */
+#define MGMT_RX_ZONE BIT(4) /* For RX Mgmt Path Msgs */
+#define DATA_TX_ZONE BIT(5) /* For TX Data Path Msgs */
+#define DATA_RX_ZONE BIT(6) /* For RX Data Path Msgs */
+#define FSM_ZONE BIT(7) /* For State Machine Msgs */
+#define ISR_ZONE BIT(8) /* For Interrupt Msgs */
+
+#define FSM_CARD_NOT_READY 0
+#define FSM_BOOT_PARAMS_SENT 1
+#define FSM_EEPROM_READ_MAC_ADDR 2
+#define FSM_RESET_MAC_SENT 3
+#define FSM_RADIO_CAPS_SENT 4
+#define FSM_BB_RF_PROG_SENT 5
+#define FSM_MAC_INIT_DONE 6
+
+extern u32 rsi_zone_enabled;
+extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
+
+#define RSI_MAX_VIFS 1
+#define NUM_EDCA_QUEUES 4
+#define IEEE80211_ADDR_LEN 6
+#define FRAME_DESC_SZ 16
+#define MIN_802_11_HDR_LEN 24
+
+#define DATA_QUEUE_WATER_MARK 400
+#define MIN_DATA_QUEUE_WATER_MARK 300
+#define MULTICAST_WATER_MARK 200
+#define MAC_80211_HDR_FRAME_CONTROL 0
+#define WME_NUM_AC 4
+#define NUM_SOFT_QUEUES 5
+#define MAX_HW_QUEUES 8
+#define INVALID_QUEUE 0xff
+#define MAX_CONTINUOUS_VO_PKTS 8
+#define MAX_CONTINUOUS_VI_PKTS 4
+
+/* Queue information */
+#define RSI_WIFI_MGMT_Q 0x4
+#define RSI_WIFI_DATA_Q 0x5
+#define IEEE80211_MGMT_FRAME 0x00
+#define IEEE80211_CTL_FRAME 0x04
+
+#define IEEE80211_QOS_TID 0x0f
+#define IEEE80211_NONQOS_TID 16
+
+#define MAX_DEBUGFS_ENTRIES 4
+
+#define TID_TO_WME_AC(_tid) ( \
+ ((_tid) == 0 || (_tid) == 3) ? BE_Q : \
+ ((_tid) < 3) ? BK_Q : \
+ ((_tid) < 6) ? VI_Q : \
+ VO_Q)
+
+#define WME_AC(_q) ( \
+ ((_q) == BK_Q) ? IEEE80211_AC_BK : \
+ ((_q) == BE_Q) ? IEEE80211_AC_BE : \
+ ((_q) == VI_Q) ? IEEE80211_AC_VI : \
+ IEEE80211_AC_VO)
+
+struct version_info {
+ u16 major;
+ u16 minor;
+ u16 release_num;
+ u16 patch_num;
+} __packed;
+
+struct skb_info {
+ s8 rssi;
+ u32 flags;
+ u16 channel;
+ s8 tid;
+ s8 sta_id;
+};
+
+enum edca_queue {
+ BK_Q,
+ BE_Q,
+ VI_Q,
+ VO_Q,
+ MGMT_SOFT_Q
+};
+
+struct security_info {
+ bool security_enable;
+ u32 ptk_cipher;
+ u32 gtk_cipher;
+};
+
+struct wmm_qinfo {
+ s32 weight;
+ s32 wme_params;
+ s32 pkt_contended;
+};
+
+struct transmit_q_stats {
+ u32 total_tx_pkt_send[NUM_EDCA_QUEUES + 1];
+ u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 1];
+};
+
+struct vif_priv {
+ bool is_ht;
+ bool sgi;
+ u16 seq_start;
+};
+
+struct rsi_event {
+ atomic_t event_condition;
+ wait_queue_head_t event_queue;
+};
+
+struct rsi_thread {
+ void (*thread_function)(void *);
+ struct completion completion;
+ struct task_struct *task;
+ struct rsi_event event;
+ atomic_t thread_done;
+};
+
+struct rsi_hw;
+
+struct rsi_common {
+ struct rsi_hw *priv;
+ struct vif_priv vif_info[RSI_MAX_VIFS];
+
+ bool mgmt_q_block;
+ struct version_info driver_ver;
+ struct version_info fw_ver;
+
+ struct rsi_thread tx_thread;
+ struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 1];
+ /* Mutex declaration */
+ struct mutex mutex;
+ /* Mutex used between tx/rx threads */
+ struct mutex tx_rxlock;
+ u8 endpoint;
+
+ /* Channel/band related */
+ u8 band;
+ u8 channel_width;
+
+ u16 rts_threshold;
+ u16 bitrate_mask[2];
+ u32 fixedrate_mask[2];
+
+ u8 rf_reset;
+ struct transmit_q_stats tx_stats;
+ struct security_info secinfo;
+ struct wmm_qinfo tx_qinfo[NUM_EDCA_QUEUES];
+ struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
+ u8 mac_addr[IEEE80211_ADDR_LEN];
+
+ /* state related */
+ u32 fsm_state;
+ bool init_done;
+ u8 bb_rf_prog_count;
+ bool iface_down;
+
+ /* Generic */
+ u8 channel;
+ u8 *rx_data_pkt;
+ u8 mac_id;
+ u8 radio_id;
+ u16 rate_pwr[20];
+ u16 min_rate;
+
+ /* WMM algo related */
+ u8 selected_qnum;
+ u32 pkt_cnt;
+ u8 min_weight;
+};
+
+struct rsi_hw {
+ struct rsi_common *priv;
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vifs[RSI_MAX_VIFS];
+ struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+
+ struct device *device;
+ u8 sc_nvifs;
+
+#ifdef CONFIG_RSI_DEBUGFS
+ struct rsi_debugfs *dfsentry;
+ u8 num_debugfs_entries;
+#endif
+ void *rsi_dev;
+ int (*host_intf_read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
+ int (*host_intf_write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
+ int (*check_hw_queue_status)(struct rsi_hw *adapter, u8 q_num);
+ int (*rx_urb_submit)(struct rsi_hw *adapter);
+ int (*determine_event_timeout)(struct rsi_hw *adapter);
+};
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
new file mode 100644
index 000000000000..ac67c4ad63c2
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -0,0 +1,285 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_MGMT_H__
+#define __RSI_MGMT_H__
+
+#include <linux/sort.h>
+#include "rsi_boot_params.h"
+#include "rsi_main.h"
+
+#define MAX_MGMT_PKT_SIZE 512
+#define RSI_NEEDED_HEADROOM 80
+#define RSI_RCV_BUFFER_LEN 2000
+
+#define RSI_11B_MODE 0
+#define RSI_11G_MODE BIT(7)
+#define RETRY_COUNT 8
+#define RETRY_LONG 4
+#define RETRY_SHORT 7
+#define WMM_SHORT_SLOT_TIME 9
+#define SIFS_DURATION 16
+
+#define KEY_TYPE_CLEAR 0
+#define RSI_PAIRWISE_KEY 1
+#define RSI_GROUP_KEY 2
+
+/* EPPROM_READ_ADDRESS */
+#define WLAN_MAC_EEPROM_ADDR 40
+#define WLAN_MAC_MAGIC_WORD_LEN 0x01
+#define WLAN_HOST_MODE_LEN 0x04
+#define WLAN_FW_VERSION_LEN 0x08
+#define MAGIC_WORD 0x5A
+
+/* Receive Frame Types */
+#define TA_CONFIRM_TYPE 0x01
+#define RX_DOT11_MGMT 0x02
+#define TX_STATUS_IND 0x04
+#define PROBEREQ_CONFIRM 2
+#define CARD_READY_IND 0x00
+
+#define RSI_DELETE_PEER 0x0
+#define RSI_ADD_PEER 0x1
+#define START_AMPDU_AGGR 0x1
+#define STOP_AMPDU_AGGR 0x0
+#define INTERNAL_MGMT_PKT 0x99
+
+#define PUT_BBP_RESET 0
+#define BBP_REG_WRITE 0
+#define RF_RESET_ENABLE BIT(3)
+#define RATE_INFO_ENABLE BIT(0)
+#define RSI_BROADCAST_PKT BIT(9)
+
+#define UPPER_20_ENABLE (0x2 << 12)
+#define LOWER_20_ENABLE (0x4 << 12)
+#define FULL40M_ENABLE 0x6
+
+#define RSI_LMAC_CLOCK_80MHZ 0x1
+#define RSI_ENABLE_40MHZ (0x1 << 3)
+
+#define RX_BA_INDICATION 1
+#define RSI_TBL_SZ 40
+#define MAX_RETRIES 8
+
+#define STD_RATE_MCS7 0x07
+#define STD_RATE_MCS6 0x06
+#define STD_RATE_MCS5 0x05
+#define STD_RATE_MCS4 0x04
+#define STD_RATE_MCS3 0x03
+#define STD_RATE_MCS2 0x02
+#define STD_RATE_MCS1 0x01
+#define STD_RATE_MCS0 0x00
+#define STD_RATE_54 0x6c
+#define STD_RATE_48 0x60
+#define STD_RATE_36 0x48
+#define STD_RATE_24 0x30
+#define STD_RATE_18 0x24
+#define STD_RATE_12 0x18
+#define STD_RATE_11 0x16
+#define STD_RATE_09 0x12
+#define STD_RATE_06 0x0C
+#define STD_RATE_5_5 0x0B
+#define STD_RATE_02 0x04
+#define STD_RATE_01 0x02
+
+#define RSI_RF_TYPE 1
+#define RSI_RATE_00 0x00
+#define RSI_RATE_1 0x0
+#define RSI_RATE_2 0x2
+#define RSI_RATE_5_5 0x4
+#define RSI_RATE_11 0x6
+#define RSI_RATE_6 0x8b
+#define RSI_RATE_9 0x8f
+#define RSI_RATE_12 0x8a
+#define RSI_RATE_18 0x8e
+#define RSI_RATE_24 0x89
+#define RSI_RATE_36 0x8d
+#define RSI_RATE_48 0x88
+#define RSI_RATE_54 0x8c
+#define RSI_RATE_MCS0 0x100
+#define RSI_RATE_MCS1 0x101
+#define RSI_RATE_MCS2 0x102
+#define RSI_RATE_MCS3 0x103
+#define RSI_RATE_MCS4 0x104
+#define RSI_RATE_MCS5 0x105
+#define RSI_RATE_MCS6 0x106
+#define RSI_RATE_MCS7 0x107
+#define RSI_RATE_MCS7_SG 0x307
+
+#define BW_20MHZ 0
+#define BW_40MHZ 1
+
+#define RSI_SUPP_FILTERS (FIF_ALLMULTI | FIF_PROBE_REQ |\
+ FIF_BCN_PRBRESP_PROMISC)
+enum opmode {
+ STA_OPMODE = 1,
+ AP_OPMODE = 2
+};
+
+extern struct ieee80211_rate rsi_rates[12];
+extern const u16 rsi_mcsrates[8];
+
+enum sta_notify_events {
+ STA_CONNECTED = 0,
+ STA_DISCONNECTED,
+ STA_TX_ADDBA_DONE,
+ STA_TX_DELBA,
+ STA_RX_ADDBA_DONE,
+ STA_RX_DELBA
+};
+
+/* Send Frames Types */
+enum cmd_frame_type {
+ TX_DOT11_MGMT,
+ RESET_MAC_REQ,
+ RADIO_CAPABILITIES,
+ BB_PROG_VALUES_REQUEST,
+ RF_PROG_VALUES_REQUEST,
+ WAKEUP_SLEEP_REQUEST,
+ SCAN_REQUEST,
+ TSF_UPDATE,
+ PEER_NOTIFY,
+ BLOCK_UNBLOCK,
+ SET_KEY_REQ,
+ AUTO_RATE_IND,
+ BOOTUP_PARAMS_REQUEST,
+ VAP_CAPABILITIES,
+ EEPROM_READ_TYPE ,
+ EEPROM_WRITE,
+ GPIO_PIN_CONFIG ,
+ SET_RX_FILTER,
+ AMPDU_IND,
+ STATS_REQUEST_FRAME,
+ BB_BUF_PROG_VALUES_REQ,
+ BBP_PROG_IN_TA,
+ BG_SCAN_PARAMS,
+ BG_SCAN_PROBE_REQ,
+ CW_MODE_REQ,
+ PER_CMD_PKT
+};
+
+struct rsi_mac_frame {
+ __le16 desc_word[8];
+} __packed;
+
+struct rsi_boot_params {
+ __le16 desc_word[8];
+ struct bootup_params bootup_params;
+} __packed;
+
+struct rsi_peer_notify {
+ __le16 desc_word[8];
+ u8 mac_addr[6];
+ __le16 command;
+ __le16 mpdu_density;
+ __le16 reserved;
+ __le32 sta_flags;
+} __packed;
+
+struct rsi_vap_caps {
+ __le16 desc_word[8];
+ u8 mac_addr[6];
+ __le16 keep_alive_period;
+ u8 bssid[6];
+ __le16 reserved;
+ __le32 flags;
+ __le16 frag_threshold;
+ __le16 rts_threshold;
+ __le32 default_mgmt_rate;
+ __le32 default_ctrl_rate;
+ __le32 default_data_rate;
+ __le16 beacon_interval;
+ __le16 dtim_period;
+} __packed;
+
+struct rsi_set_key {
+ __le16 desc_word[8];
+ u8 key[4][32];
+ u8 tx_mic_key[8];
+ u8 rx_mic_key[8];
+} __packed;
+
+struct rsi_auto_rate {
+ __le16 desc_word[8];
+ __le16 failure_limit;
+ __le16 initial_boundary;
+ __le16 max_threshold_limt;
+ __le16 num_supported_rates;
+ __le16 aarf_rssi;
+ __le16 moderate_rate_inx;
+ __le16 collision_tolerance;
+ __le16 supported_rates[40];
+} __packed;
+
+struct qos_params {
+ __le16 cont_win_min_q;
+ __le16 cont_win_max_q;
+ __le16 aifsn_val_q;
+ __le16 txop_q;
+} __packed;
+
+struct rsi_radio_caps {
+ __le16 desc_word[8];
+ struct qos_params qos_params[MAX_HW_QUEUES];
+ u8 num_11n_rates;
+ u8 num_11ac_rates;
+ __le16 gcpd_per_rate[20];
+} __packed;
+
+static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
+{
+ return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
+}
+
+static inline u32 rsi_get_length(u8 *addr, u16 offset)
+{
+ return (le16_to_cpu(*(__le16 *)&addr[offset])) & 0x0fff;
+}
+
+static inline u8 rsi_get_extended_desc(u8 *addr, u16 offset)
+{
+ return le16_to_cpu(*((__le16 *)&addr[offset + 4])) & 0x00ff;
+}
+
+static inline u8 rsi_get_rssi(u8 *addr)
+{
+ return *(u8 *)(addr + FRAME_DESC_SZ);
+}
+
+static inline u8 rsi_get_channel(u8 *addr)
+{
+ return *(char *)(addr + 15);
+}
+
+int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg);
+int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode);
+int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid,
+ u16 ssn, u8 buf_size, u8 event);
+int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len,
+ u8 key_type, u8 key_id, u32 cipher);
+int rsi_set_channel(struct rsi_common *common, u16 chno);
+void rsi_inform_bss_status(struct rsi_common *common, u8 status,
+ const u8 *bssid, u8 qos_enable, u16 aid);
+void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb);
+int rsi_mac80211_attach(struct rsi_common *common);
+void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb,
+ int status);
+bool rsi_is_cipher_wep(struct rsi_common *common);
+void rsi_core_qos_processor(struct rsi_common *common);
+void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb);
+int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb);
+int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb);
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
new file mode 100644
index 000000000000..df4b5e20e05f
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -0,0 +1,129 @@
+/**
+ * @section LICENSE
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef __RSI_SDIO_INTF__
+#define __RSI_SDIO_INTF__
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio_ids.h>
+#include "rsi_main.h"
+
+enum sdio_interrupt_type {
+ BUFFER_FULL = 0x0,
+ BUFFER_AVAILABLE = 0x1,
+ FIRMWARE_ASSERT_IND = 0x3,
+ MSDU_PACKET_PENDING = 0x4,
+ UNKNOWN_INT = 0XE
+};
+
+/* Buffer status register related info */
+#define PKT_BUFF_SEMI_FULL 0
+#define PKT_BUFF_FULL 1
+#define PKT_MGMT_BUFF_FULL 2
+#define MSDU_PKT_PENDING 3
+/* Interrupt Bit Related Macros */
+#define PKT_BUFF_AVAILABLE 0
+#define FW_ASSERT_IND 2
+
+#define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3
+#define RSI_FN1_INT_REGISTER 0xf9
+#define RSI_SD_REQUEST_MASTER 0x10000
+
+/* FOR SD CARD ONLY */
+#define SDIO_RX_NUM_BLOCKS_REG 0x000F1
+#define SDIO_FW_STATUS_REG 0x000F2
+#define SDIO_NXT_RD_DELAY2 0x000F5
+#define SDIO_MASTER_ACCESS_MSBYTE 0x000FA
+#define SDIO_MASTER_ACCESS_LSBYTE 0x000FB
+#define SDIO_READ_START_LVL 0x000FC
+#define SDIO_READ_FIFO_CTL 0x000FD
+#define SDIO_WRITE_FIFO_CTL 0x000FE
+#define SDIO_FUN1_INTR_CLR_REG 0x0008
+#define SDIO_REG_HIGH_SPEED 0x0013
+
+#define RSI_GET_SDIO_INTERRUPT_TYPE(_I, TYPE) \
+ { \
+ TYPE = \
+ (_I & (1 << PKT_BUFF_AVAILABLE)) ? \
+ BUFFER_AVAILABLE : \
+ (_I & (1 << MSDU_PKT_PENDING)) ? \
+ MSDU_PACKET_PENDING : \
+ (_I & (1 << FW_ASSERT_IND)) ? \
+ FIRMWARE_ASSERT_IND : UNKNOWN_INT; \
+ }
+
+/* common registers in SDIO function1 */
+#define TA_SOFT_RESET_REG 0x0004
+#define TA_TH0_PC_REG 0x0400
+#define TA_HOLD_THREAD_REG 0x0844
+#define TA_RELEASE_THREAD_REG 0x0848
+
+#define TA_SOFT_RST_CLR 0
+#define TA_SOFT_RST_SET BIT(0)
+#define TA_PC_ZERO 0
+#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF)
+#define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF)
+#define TA_BASE_ADDR 0x2200
+#define MISC_CFG_BASE_ADDR 0x4150
+
+struct receive_info {
+ bool buffer_full;
+ bool semi_buffer_full;
+ bool mgmt_buffer_full;
+ u32 mgmt_buf_full_counter;
+ u32 buf_semi_full_counter;
+ u8 watch_bufferfull_count;
+ u32 sdio_intr_status_zero;
+ u32 sdio_int_counter;
+ u32 total_sdio_msdu_pending_intr;
+ u32 total_sdio_unknown_intr;
+ u32 buf_full_counter;
+ u32 buf_avilable_counter;
+};
+
+struct rsi_91x_sdiodev {
+ struct sdio_func *pfunction;
+ struct task_struct *in_sdio_litefi_irq;
+ struct receive_info rx_info;
+ u32 next_read_delay;
+ u32 sdio_high_speed_enable;
+ u8 sdio_clock_speed;
+ u32 cardcapability;
+ u8 prev_desc[16];
+ u32 tx_blk_size;
+ u8 write_fail;
+};
+
+void rsi_interrupt_handler(struct rsi_hw *adapter);
+int rsi_init_sdio_slave_regs(struct rsi_hw *adapter);
+int rsi_sdio_device_init(struct rsi_common *common);
+int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data);
+int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter, u8 *pkt, u32 length);
+int rsi_sdio_write_register(struct rsi_hw *adapter, u8 function,
+ u32 addr, u8 *data);
+int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, u32 addr,
+ u8 *data, u32 count);
+void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit);
+int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter);
+int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num);
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
new file mode 100644
index 000000000000..ebea0c411ead
--- /dev/null
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -0,0 +1,68 @@
+/**
+ * @section LICENSE
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_USB_INTF__
+#define __RSI_USB_INTF__
+
+#include <linux/usb.h>
+#include "rsi_main.h"
+#include "rsi_common.h"
+
+#define USB_INTERNAL_REG_1 0x25000
+#define RSI_USB_READY_MAGIC_NUM 0xab
+#define FW_STATUS_REG 0x41050012
+
+#define USB_VENDOR_REGISTER_READ 0x15
+#define USB_VENDOR_REGISTER_WRITE 0x16
+#define RSI_USB_TX_HEAD_ROOM 128
+
+#define MAX_RX_URBS 1
+#define MAX_BULK_EP 8
+#define MGMT_EP 1
+#define DATA_EP 2
+
+struct rsi_91x_usbdev {
+ struct rsi_thread rx_thread;
+ u8 endpoint;
+ struct usb_device *usbdev;
+ struct usb_interface *pfunction;
+ struct urb *rx_usb_urb[MAX_RX_URBS];
+ u8 *tx_buffer;
+ __le16 bulkin_size;
+ u8 bulkin_endpoint_addr;
+ __le16 bulkout_size[MAX_BULK_EP];
+ u8 bulkout_endpoint_addr[MAX_BULK_EP];
+ u32 tx_blk_size;
+ u8 write_fail;
+};
+
+static inline int rsi_usb_check_queue_status(struct rsi_hw *adapter, u8 q_num)
+{
+ /* In USB, there isn't any need to check the queue status */
+ return QUEUE_NOT_FULL;
+}
+
+static inline int rsi_usb_event_timeout(struct rsi_hw *adapter)
+{
+ return EVENT_WAIT_FOREVER;
+}
+
+int rsi_usb_device_init(struct rsi_common *common);
+int rsi_usb_write_register_multiple(struct rsi_hw *adapter, u32 addr,
+ u8 *data, u32 count);
+void rsi_usb_rx_thread(struct rsi_common *common);
+#endif
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 42a2e06512f2..a49c3d73ea2c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -125,9 +125,9 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
if (unlikely(tout))
- rt2x00_warn(entry->queue->rt2x00dev,
- "TX status timeout for entry %d in queue %d\n",
- entry->entry_idx, entry->queue->qid);
+ rt2x00_dbg(entry->queue->rt2x00dev,
+ "TX status timeout for entry %d in queue %d\n",
+ entry->entry_idx, entry->queue->qid);
return tout;
}
@@ -566,8 +566,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(rt2x00queue_empty(queue))) {
- rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
- qid);
+ rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+ qid);
break;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 2e3d1645e68b..90fdb02b55e7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -286,7 +286,7 @@ static ssize_t rt2x00debug_read_queue_dump(struct file *file,
if (retval)
return retval;
- status = min((size_t)skb->len, length);
+ status = min_t(size_t, skb->len, length);
if (copy_to_user(buf, skb->data, status)) {
status = -EFAULT;
goto exit;
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/rtl818x/Kconfig
index 30332175bcd8..1ce1d55f0010 100644
--- a/drivers/net/wireless/rtl818x/Kconfig
+++ b/drivers/net/wireless/rtl818x/Kconfig
@@ -2,11 +2,11 @@
# RTL818X Wireless LAN device configuration
#
config RTL8180
- tristate "Realtek 8180/8185 PCI support"
+ tristate "Realtek 8180/8185/8187SE PCI support"
depends on MAC80211 && PCI
select EEPROM_93CX6
---help---
- This is a driver for RTL8180 and RTL8185 based cards.
+ This is a driver for RTL8180, RTL8185 and RTL8187SE based cards.
These are PCI based chips found in cards such as:
(RTL8185 802.11g)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
index cb4fb8596f0b..08b056db4a3b 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -1,4 +1,4 @@
-rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o
+rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
obj-$(CONFIG_RTL8180) += rtl8180.o
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 3867d1470b36..98d8256f0377 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1,15 +1,42 @@
-/*
- * Linux device driver for RTL8180 / RTL8185
+/* Linux device driver for RTL8180 / RTL8185 / RTL8187SE
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
+ * Copyright 2007,2014 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8180 driver, which is:
* Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* Thanks to Realtek for their support!
*
+ ************************************************************************
+ *
+ * The driver was extended to the RTL8187SE in 2014 by
+ * Andrea Merello <andrea.merello@gmail.com>
+ *
+ * based also on:
+ * - portions of rtl8187se Linux staging driver, Copyright Realtek corp.
+ * - other GPL, unpublished (until now), Linux driver code,
+ * Copyright Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ * A huge thanks goes to Sara V. Nari who forgives me when I'm
+ * sitting in front of my laptop at evening, week-end, night...
+ *
+ * A special thanks goes to Antonio Cuni, who helped me with
+ * some python userspace stuff I used to debug RTL8187SE code, and who
+ * bought a laptop with an unsupported Wi-Fi card some years ago...
+ *
+ * Thanks to Larry Finger for writing some code for rtl8187se and for
+ * his suggestions.
+ *
+ * Thanks to Dan Carpenter for reviewing my initial patch and for his
+ * suggestions.
+ *
+ * Thanks to Bernhard Schiffner for his help in testing and for his
+ * suggestions.
+ *
+ ************************************************************************
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -29,13 +56,18 @@
#include "sa2400.h"
#include "max2820.h"
#include "grf5101.h"
+#include "rtl8225se.h"
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
-MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
+MODULE_DESCRIPTION("RTL8180 / RTL8185 / RTL8187SE PCI wireless driver");
MODULE_LICENSE("GPL");
static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
+
+ /* rtl8187se */
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8199) },
+
/* rtl8185 */
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
{ PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -85,6 +117,76 @@ static const struct ieee80211_channel rtl818x_channels[] = {
{ .center_freq = 2484 },
};
+/* Queues for rtl8187se card
+ *
+ * name | reg | queue
+ * BC | 7 | 6
+ * MG | 1 | 0
+ * HI | 6 | 1
+ * VO | 5 | 2
+ * VI | 4 | 3
+ * BE | 3 | 4
+ * BK | 2 | 5
+ *
+ * The complete map for DMA kick reg using use all queue is:
+ * static const int rtl8187se_queues_map[RTL8187SE_NR_TX_QUEUES] =
+ * {1, 6, 5, 4, 3, 2, 7};
+ *
+ * .. but.. Because for mac80211 4 queues are enough for QoS we use this
+ *
+ * name | reg | queue
+ * BC | 7 | 4 <- currently not used yet
+ * MG | 1 | x <- Not used
+ * HI | 6 | x <- Not used
+ * VO | 5 | 0 <- used
+ * VI | 4 | 1 <- used
+ * BE | 3 | 2 <- used
+ * BK | 2 | 3 <- used
+ *
+ * Beacon queue could be used, but this is not finished yet.
+ *
+ * I thougth about using the other two queues but I decided not to do this:
+ *
+ * - I'm unsure whether the mac80211 will ever try to use more than 4 queues
+ * by itself.
+ *
+ * - I could route MGMT frames (currently sent over VO queue) to the MGMT
+ * queue but since mac80211 will do not know about it, I will probably gain
+ * some HW priority whenever the VO queue is not empty, but this gain is
+ * limited by the fact that I had to stop the mac80211 queue whenever one of
+ * the VO or MGMT queues is full, stopping also submitting of MGMT frame
+ * to the driver.
+ *
+ * - I don't know how to set in the HW the contention window params for MGMT
+ * and HI-prio queues.
+ */
+
+static const int rtl8187se_queues_map[RTL8187SE_NR_TX_QUEUES] = {5, 4, 3, 2, 7};
+
+/* Queues for rtl8180/rtl8185 cards
+ *
+ * name | reg | prio
+ * BC | 7 | 3
+ * HI | 6 | 0
+ * NO | 5 | 1
+ * LO | 4 | 2
+ *
+ * The complete map for DMA kick reg using all queue is:
+ * static const int rtl8180_queues_map[RTL8180_NR_TX_QUEUES] = {6, 5, 4, 7};
+ *
+ * .. but .. Because the mac80211 needs at least 4 queues for QoS or
+ * otherwise QoS can't be done, we use just one.
+ * Beacon queue could be used, but this is not finished yet.
+ * Actual map is:
+ *
+ * name | reg | prio
+ * BC | 7 | 1 <- currently not used yet.
+ * HI | 6 | x <- not used
+ * NO | 5 | x <- not used
+ * LO | 4 | 0 <- used
+ */
+
+static const int rtl8180_queues_map[RTL8180_NR_TX_QUEUES] = {4, 7};
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
{
@@ -105,14 +207,30 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
static void rtl8180_handle_rx(struct ieee80211_hw *dev)
{
struct rtl8180_priv *priv = dev->priv;
+ struct rtl818x_rx_cmd_desc *cmd_desc;
unsigned int count = 32;
u8 signal, agc, sq;
dma_addr_t mapping;
while (count--) {
- struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
+ void *entry = priv->rx_ring + priv->rx_idx * priv->rx_ring_sz;
struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
- u32 flags = le32_to_cpu(entry->flags);
+ u32 flags, flags2;
+ u64 tsft;
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ struct rtl8187se_rx_desc *desc = entry;
+
+ flags = le32_to_cpu(desc->flags);
+ flags2 = le32_to_cpu(desc->flags2);
+ tsft = le64_to_cpu(desc->tsft);
+ } else {
+ struct rtl8180_rx_desc *desc = entry;
+
+ flags = le32_to_cpu(desc->flags);
+ flags2 = le32_to_cpu(desc->flags2);
+ tsft = le64_to_cpu(desc->tsft);
+ }
if (flags & RTL818X_RX_DESC_FLAG_OWN)
return;
@@ -122,7 +240,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
RTL818X_RX_DESC_FLAG_RX_ERR)))
goto done;
else {
- u32 flags2 = le32_to_cpu(entry->flags2);
struct ieee80211_rx_status rx_status = {0};
struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_SIZE);
@@ -148,19 +265,24 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.antenna = (flags2 >> 15) & 1;
rx_status.rate_idx = (flags >> 20) & 0xF;
agc = (flags2 >> 17) & 0x7F;
- if (priv->r8185) {
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
if (rx_status.rate_idx > 3)
signal = 90 - clamp_t(u8, agc, 25, 90);
else
signal = 95 - clamp_t(u8, agc, 30, 95);
- } else {
+ } else if (priv->chip_family ==
+ RTL818X_CHIP_FAMILY_RTL8180) {
sq = flags2 & 0xff;
signal = priv->rf->calc_rssi(agc, sq);
+ } else {
+ /* TODO: rtl8187se rssi */
+ signal = 10;
}
rx_status.signal = signal;
rx_status.freq = dev->conf.chandef.chan->center_freq;
rx_status.band = dev->conf.chandef.chan->band;
- rx_status.mactime = le64_to_cpu(entry->tsft);
+ rx_status.mactime = tsft;
rx_status.flag |= RX_FLAG_MACTIME_START;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -174,11 +296,13 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
}
done:
- entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
- entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
+ cmd_desc = entry;
+ cmd_desc->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
+ cmd_desc->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
MAX_RX_SIZE);
if (priv->rx_idx == 31)
- entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
+ cmd_desc->flags |=
+ cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
priv->rx_idx = (priv->rx_idx + 1) % 32;
}
}
@@ -218,6 +342,55 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
}
}
+static irqreturn_t rtl8187se_interrupt(int irq, void *dev_id)
+{
+ struct ieee80211_hw *dev = dev_id;
+ struct rtl8180_priv *priv = dev->priv;
+ u32 reg;
+ unsigned long flags;
+ static int desc_err;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Note: 32-bit interrupt status */
+ reg = rtl818x_ioread32(priv, &priv->map->INT_STATUS_SE);
+ if (unlikely(reg == 0xFFFFFFFF)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ rtl818x_iowrite32(priv, &priv->map->INT_STATUS_SE, reg);
+
+ if (reg & IMR_TIMEOUT1)
+ rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
+
+ if (reg & (IMR_TBDOK | IMR_TBDER))
+ rtl8180_handle_tx(dev, 4);
+
+ if (reg & (IMR_TVODOK | IMR_TVODER))
+ rtl8180_handle_tx(dev, 0);
+
+ if (reg & (IMR_TVIDOK | IMR_TVIDER))
+ rtl8180_handle_tx(dev, 1);
+
+ if (reg & (IMR_TBEDOK | IMR_TBEDER))
+ rtl8180_handle_tx(dev, 2);
+
+ if (reg & (IMR_TBKDOK | IMR_TBKDER))
+ rtl8180_handle_tx(dev, 3);
+
+ if (reg & (IMR_ROK | IMR_RER | RTL818X_INT_SE_RX_DU | IMR_RQOSOK))
+ rtl8180_handle_rx(dev);
+ /* The interface sometimes generates several RX DMA descriptor errors
+ * at startup. Do not report these.
+ */
+ if ((reg & RTL818X_INT_SE_RX_DU) && desc_err++ > 2)
+ if (net_ratelimit())
+ wiphy_err(dev->wiphy, "No RX DMA Descriptor avail\n");
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+}
+
static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *dev = dev_id;
@@ -234,12 +407,6 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg);
if (reg & (RTL818X_INT_TXB_OK | RTL818X_INT_TXB_ERR))
- rtl8180_handle_tx(dev, 3);
-
- if (reg & (RTL818X_INT_TXH_OK | RTL818X_INT_TXH_ERR))
- rtl8180_handle_tx(dev, 2);
-
- if (reg & (RTL818X_INT_TXN_OK | RTL818X_INT_TXN_ERR))
rtl8180_handle_tx(dev, 1);
if (reg & (RTL818X_INT_TXL_OK | RTL818X_INT_TXL_ERR))
@@ -263,12 +430,14 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
struct rtl8180_tx_ring *ring;
struct rtl8180_tx_desc *entry;
unsigned long flags;
- unsigned int idx, prio;
+ unsigned int idx, prio, hw_prio;
dma_addr_t mapping;
u32 tx_flags;
u8 rc_flags;
u16 plcp_len = 0;
__le16 rts_duration = 0;
+ /* do arithmetic and then convert to le16 */
+ u16 frame_duration = 0;
prio = skb_get_queue_mapping(skb);
ring = &priv->tx_ring[prio];
@@ -280,7 +449,6 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
kfree_skb(skb);
dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
return;
-
}
tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
@@ -288,7 +456,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
(ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
skb->len;
- if (priv->r8185)
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
tx_flags |= RTL818X_TX_DESC_FLAG_DMA |
RTL818X_TX_DESC_FLAG_NO_ENC;
@@ -305,7 +473,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
info);
- if (!priv->r8185) {
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
unsigned int remainder;
plcp_len = DIV_ROUND_UP(16 * (skb->len + 4),
@@ -316,6 +484,18 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
plcp_len |= 1 << 15;
}
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ __le16 duration;
+ /* SIFS time (required by HW) is already included by
+ * ieee80211_generic_frame_duration
+ */
+ duration = ieee80211_generic_frame_duration(dev, priv->vif,
+ IEEE80211_BAND_2GHZ, skb->len,
+ ieee80211_get_tx_rate(dev, info));
+
+ frame_duration = priv->ack_time + le16_to_cpu(duration);
+ }
+
spin_lock_irqsave(&priv->lock, flags);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -328,21 +508,91 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
entry = &ring->desc[idx];
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ entry->frame_duration = cpu_to_le16(frame_duration);
+ entry->frame_len_se = cpu_to_le16(skb->len);
+
+ /* tpc polarity */
+ entry->flags3 = cpu_to_le16(1<<4);
+ } else
+ entry->frame_len = cpu_to_le32(skb->len);
+
entry->rts_duration = rts_duration;
entry->plcp_len = cpu_to_le16(plcp_len);
entry->tx_buf = cpu_to_le32(mapping);
- entry->frame_len = cpu_to_le32(skb->len);
+
entry->flags2 = info->control.rates[1].idx >= 0 ?
ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0;
entry->retry_limit = info->control.rates[0].count;
+
+ /* We must be sure that tx_flags is written last because the HW
+ * looks at it to check if the rest of data is valid or not
+ */
+ wmb();
entry->flags = cpu_to_le32(tx_flags);
+ /* We must be sure this has been written before followings HW
+ * register write, because this write will made the HW attempts
+ * to DMA the just-written data
+ */
+ wmb();
+
__skb_queue_tail(&ring->queue, skb);
if (ring->entries - skb_queue_len(&ring->queue) < 2)
ieee80211_stop_queue(dev, prio);
spin_unlock_irqrestore(&priv->lock, flags);
- rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ /* just poll: rings are stopped with TPPollStop reg */
+ hw_prio = rtl8187se_queues_map[prio];
+ rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+ (1 << hw_prio));
+ } else {
+ hw_prio = rtl8180_queues_map[prio];
+ rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+ (1 << hw_prio) | /* ring to poll */
+ (1<<1) | (1<<2));/* stopped rings */
+ }
+}
+
+static void rtl8180_set_anaparam3(struct rtl8180_priv *priv, u16 anaparam3)
+{
+ u8 reg;
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_CONFIG);
+
+ reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+ reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+ rtl818x_iowrite16(priv, &priv->map->ANAPARAM3, anaparam3);
+
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+ reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_NORMAL);
+}
+
+void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2)
+{
+ u8 reg;
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_CONFIG);
+
+ reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+ reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+ rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
+
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+ reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_NORMAL);
}
void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -359,17 +609,171 @@ void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
}
+static void rtl8187se_mac_config(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u8 reg;
+
+ rtl818x_iowrite32(priv, REG_ADDR4(0x1F0), 0);
+ rtl818x_ioread32(priv, REG_ADDR4(0x1F0));
+ rtl818x_iowrite32(priv, REG_ADDR4(0x1F4), 0);
+ rtl818x_ioread32(priv, REG_ADDR4(0x1F4));
+ rtl818x_iowrite8(priv, REG_ADDR1(0x1F8), 0);
+ rtl818x_ioread8(priv, REG_ADDR1(0x1F8));
+ /* Enable DA10 TX power saving */
+ reg = rtl818x_ioread8(priv, &priv->map->PHY_PR);
+ rtl818x_iowrite8(priv, &priv->map->PHY_PR, reg | 0x04);
+ /* Power */
+ rtl818x_iowrite16(priv, PI_DATA_REG, 0x1000);
+ rtl818x_iowrite16(priv, SI_DATA_REG, 0x1000);
+ /* AFE - default to power ON */
+ rtl818x_iowrite16(priv, REG_ADDR2(0x370), 0x0560);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x372), 0x0560);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x374), 0x0DA4);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x376), 0x0DA4);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x378), 0x0560);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x37A), 0x0560);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x37C), 0x00EC);
+ rtl818x_iowrite16(priv, REG_ADDR2(0x37E), 0x00EC);
+ rtl818x_iowrite8(priv, REG_ADDR1(0x24E), 0x01);
+ /* unknown, needed for suspend to RAM resume */
+ rtl818x_iowrite8(priv, REG_ADDR1(0x0A), 0x72);
+}
+
+static void rtl8187se_set_antenna_config(struct ieee80211_hw *dev, u8 def_ant,
+ bool diversity)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ rtl8225_write_phy_cck(dev, 0x0C, 0x09);
+ if (diversity) {
+ if (def_ant == 1) {
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
+ rtl8225_write_phy_cck(dev, 0x11, 0xBB);
+ rtl8225_write_phy_cck(dev, 0x01, 0xC7);
+ rtl8225_write_phy_ofdm(dev, 0x0D, 0x54);
+ rtl8225_write_phy_ofdm(dev, 0x18, 0xB2);
+ } else { /* main antenna */
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
+ rtl8225_write_phy_cck(dev, 0x11, 0x9B);
+ rtl8225_write_phy_cck(dev, 0x01, 0xC7);
+ rtl8225_write_phy_ofdm(dev, 0x0D, 0x5C);
+ rtl8225_write_phy_ofdm(dev, 0x18, 0xB2);
+ }
+ } else { /* disable antenna diversity */
+ if (def_ant == 1) {
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
+ rtl8225_write_phy_cck(dev, 0x11, 0xBB);
+ rtl8225_write_phy_cck(dev, 0x01, 0x47);
+ rtl8225_write_phy_ofdm(dev, 0x0D, 0x54);
+ rtl8225_write_phy_ofdm(dev, 0x18, 0x32);
+ } else { /* main antenna */
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
+ rtl8225_write_phy_cck(dev, 0x11, 0x9B);
+ rtl8225_write_phy_cck(dev, 0x01, 0x47);
+ rtl8225_write_phy_ofdm(dev, 0x0D, 0x5C);
+ rtl8225_write_phy_ofdm(dev, 0x18, 0x32);
+ }
+ }
+ /* priv->curr_ant = def_ant; */
+}
+
+static void rtl8180_int_enable(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK |
+ IMR_TBDER | IMR_THPDER |
+ IMR_THPDER | IMR_THPDOK |
+ IMR_TVODER | IMR_TVODOK |
+ IMR_TVIDER | IMR_TVIDOK |
+ IMR_TBEDER | IMR_TBEDOK |
+ IMR_TBKDER | IMR_TBKDOK |
+ IMR_RDU | IMR_RER |
+ IMR_ROK | IMR_RQOSOK);
+ } else {
+ rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
+ }
+}
+
+static void rtl8180_int_disable(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite32(priv, &priv->map->IMR, 0);
+ } else {
+ rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+ }
+}
+
+static void rtl8180_conf_basic_rates(struct ieee80211_hw *dev,
+ u32 rates_mask)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ u8 max, min;
+ u16 reg;
+
+ max = fls(rates_mask) - 1;
+ min = ffs(rates_mask) - 1;
+
+ switch (priv->chip_family) {
+
+ case RTL818X_CHIP_FAMILY_RTL8180:
+ /* in 8180 this is NOT a BITMAP */
+ reg = rtl818x_ioread16(priv, &priv->map->BRSR);
+ reg &= ~3;
+ reg |= max;
+ rtl818x_iowrite16(priv, &priv->map->BRSR, reg);
+ break;
+
+ case RTL818X_CHIP_FAMILY_RTL8185:
+ /* in 8185 this is a BITMAP */
+ rtl818x_iowrite16(priv, &priv->map->BRSR, rates_mask);
+ rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (max << 4) | min);
+ break;
+
+ case RTL818X_CHIP_FAMILY_RTL8187SE:
+ /* in 8187se this is a BITMAP */
+ rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, rates_mask);
+ break;
+ }
+}
+
+static void rtl8180_config_cardbus(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u16 reg16;
+ u8 reg8;
+
+ reg8 = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+ reg8 |= 1 << 1;
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg8);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite16(priv, FEMR_SE, 0xffff);
+ } else {
+ reg16 = rtl818x_ioread16(priv, &priv->map->FEMR);
+ reg16 |= (1 << 15) | (1 << 14) | (1 << 4);
+ rtl818x_iowrite16(priv, &priv->map->FEMR, reg16);
+ }
+
+}
+
static int rtl8180_init_hw(struct ieee80211_hw *dev)
{
struct rtl8180_priv *priv = dev->priv;
u16 reg;
+ u32 reg32;
rtl818x_iowrite8(priv, &priv->map->CMD, 0);
rtl818x_ioread8(priv, &priv->map->CMD);
msleep(10);
/* reset */
- rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+ rtl8180_int_disable(dev);
rtl818x_ioread8(priv, &priv->map->CMD);
reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -390,31 +794,45 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
msleep(200);
if (rtl818x_ioread8(priv, &priv->map->CONFIG3) & (1 << 3)) {
- /* For cardbus */
- reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
- reg |= 1 << 1;
- rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
- reg = rtl818x_ioread16(priv, &priv->map->FEMR);
- reg |= (1 << 15) | (1 << 14) | (1 << 4);
- rtl818x_iowrite16(priv, &priv->map->FEMR, reg);
+ rtl8180_config_cardbus(dev);
}
- rtl818x_iowrite8(priv, &priv->map->MSR, 0);
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
+ else
+ rtl818x_iowrite8(priv, &priv->map->MSR, 0);
- if (!priv->r8185)
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
rtl8180_set_anaparam(priv, priv->anaparam);
rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma);
- rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma);
- rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma);
- rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
- rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
+ /* mac80211 queue have higher prio for lower index. The last queue
+ * (that mac80211 is not aware of) is reserved for beacons (and have
+ * the highest priority on the NIC)
+ */
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite32(priv, &priv->map->TBDA,
+ priv->tx_ring[1].dma);
+ rtl818x_iowrite32(priv, &priv->map->TLPDA,
+ priv->tx_ring[0].dma);
+ } else {
+ rtl818x_iowrite32(priv, &priv->map->TBDA,
+ priv->tx_ring[4].dma);
+ rtl818x_iowrite32(priv, &priv->map->TVODA,
+ priv->tx_ring[0].dma);
+ rtl818x_iowrite32(priv, &priv->map->TVIDA,
+ priv->tx_ring[1].dma);
+ rtl818x_iowrite32(priv, &priv->map->TBEDA,
+ priv->tx_ring[2].dma);
+ rtl818x_iowrite32(priv, &priv->map->TBKDA,
+ priv->tx_ring[3].dma);
+ }
/* TODO: necessary? specs indicate not */
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg & ~(1 << 3));
- if (priv->r8185) {
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg | (1 << 4));
}
@@ -426,13 +844,17 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
- if (priv->r8185) {
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81);
- rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0);
+ } else {
+ rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
- rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
+ rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6);
+ rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C);
+ }
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
/* TODO: set ClkRun enable? necessary? */
reg = rtl818x_ioread8(priv, &priv->map->GP_ENABLE);
rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, reg & ~(1 << 6));
@@ -440,28 +862,90 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | (1 << 2));
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
- } else {
- rtl818x_iowrite16(priv, &priv->map->BRSR, 0x1);
- rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
+ }
- rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6);
- rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C);
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+
+ /* the set auto rate fallback bitmask from 1M to 54 Mb/s */
+ rtl818x_iowrite16(priv, ARFR, 0xFFF);
+ rtl818x_ioread16(priv, ARFR);
+
+ /* stop unused queus (no dma alloc) */
+ rtl818x_iowrite8(priv, &priv->map->TPPOLL_STOP,
+ RTL818x_TPPOLL_STOP_MG | RTL818x_TPPOLL_STOP_HI);
+
+ rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0x00);
+ rtl818x_iowrite16(priv, &priv->map->TID_AC_MAP, 0xFA50);
+
+ rtl818x_iowrite16(priv, &priv->map->INT_MIG, 0);
+
+ /* some black magic here.. */
+ rtl8187se_mac_config(dev);
+
+ rtl818x_iowrite16(priv, RFSW_CTRL, 0x569A);
+ rtl818x_ioread16(priv, RFSW_CTRL);
+
+ rtl8180_set_anaparam(priv, RTL8225SE_ANAPARAM_ON);
+ rtl8180_set_anaparam2(priv, RTL8225SE_ANAPARAM2_ON);
+ rtl8180_set_anaparam3(priv, RTL8225SE_ANAPARAM3);
+
+
+ rtl818x_iowrite8(priv, &priv->map->CONFIG5,
+ rtl818x_ioread8(priv, &priv->map->CONFIG5) & 0x7F);
+
+ /*probably this switch led on */
+ rtl818x_iowrite8(priv, &priv->map->PGSELECT,
+ rtl818x_ioread8(priv, &priv->map->PGSELECT) | 0x08);
+
+ rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
+ rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1BFF);
+ rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
+
+ rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x4003);
+
+ /* the reference code mac hardcode table write
+ * this reg by doing byte-wide accesses.
+ * It does it just for lowest and highest byte..
+ */
+ reg32 = rtl818x_ioread32(priv, &priv->map->RF_PARA);
+ reg32 &= 0x00ffff00;
+ reg32 |= 0xb8000054;
+ rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
}
priv->rf->init(dev);
- if (priv->r8185)
- rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
+
+ /* default basic rates are 1,2 Mbps for rtl8180. 1,2,6,9,12,18,24 Mbps
+ * otherwise. bitmask 0x3 and 0x01f3 respectively.
+ * NOTE: currenty rtl8225 RF code changes basic rates, so we need to do
+ * this after rf init.
+ * TODO: try to find out whether RF code really needs to do this..
+ */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
+ rtl8180_conf_basic_rates(dev, 0x3);
+ else
+ rtl8180_conf_basic_rates(dev, 0x1f3);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ rtl8187se_set_antenna_config(dev,
+ priv->antenna_diversity_default,
+ priv->antenna_diversity_en);
return 0;
}
static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
{
struct rtl8180_priv *priv = dev->priv;
- struct rtl8180_rx_desc *entry;
+ struct rtl818x_rx_cmd_desc *entry;
int i;
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ priv->rx_ring_sz = sizeof(struct rtl8187se_rx_desc);
+ else
+ priv->rx_ring_sz = sizeof(struct rtl8180_rx_desc);
+
priv->rx_ring = pci_alloc_consistent(priv->pdev,
- sizeof(*priv->rx_ring) * 32,
+ priv->rx_ring_sz * 32,
&priv->rx_ring_dma);
if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
@@ -469,20 +953,28 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
return -ENOMEM;
}
- memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * 32);
+ memset(priv->rx_ring, 0, priv->rx_ring_sz * 32);
priv->rx_idx = 0;
for (i = 0; i < 32; i++) {
struct sk_buff *skb = dev_alloc_skb(MAX_RX_SIZE);
dma_addr_t *mapping;
- entry = &priv->rx_ring[i];
- if (!skb)
- return 0;
-
+ entry = priv->rx_ring + priv->rx_ring_sz*i;
+ if (!skb) {
+ wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
+ return -ENOMEM;
+ }
priv->rx_buf[i] = skb;
mapping = (dma_addr_t *)skb->cb;
*mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, *mapping)) {
+ kfree_skb(skb);
+ wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
+ return -ENOMEM;
+ }
+
entry->rx_buf = cpu_to_le32(*mapping);
entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
MAX_RX_SIZE);
@@ -507,7 +999,7 @@ static void rtl8180_free_rx_ring(struct ieee80211_hw *dev)
kfree_skb(skb);
}
- pci_free_consistent(priv->pdev, sizeof(*priv->rx_ring) * 32,
+ pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
priv->rx_ring, priv->rx_ring_dma);
priv->rx_ring = NULL;
}
@@ -571,7 +1063,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
if (ret)
return ret;
- for (i = 0; i < 4; i++)
+ for (i = 0; i < (dev->queues + 1); i++)
if ((ret = rtl8180_init_tx_ring(dev, i, 16)))
goto err_free_rings;
@@ -579,23 +1071,28 @@ static int rtl8180_start(struct ieee80211_hw *dev)
if (ret)
goto err_free_rings;
- rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma);
- rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma);
- rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma);
- rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
- rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
-
- ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ ret = request_irq(priv->pdev->irq, rtl8187se_interrupt,
IRQF_SHARED, KBUILD_MODNAME, dev);
+ } else {
+ ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ }
+
if (ret) {
wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
goto err_free_rings;
}
- rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
+ rtl8180_int_enable(dev);
- rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
- rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
+ /* in rtl8187se at MAR regs offset there is the management
+ * TX descriptor DMA addres..
+ */
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8187SE) {
+ rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
+ rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
+ }
reg = RTL818X_RX_CONF_ONLYERLPKT |
RTL818X_RX_CONF_RX_AUTORESETPHY |
@@ -605,27 +1102,42 @@ static int rtl8180_start(struct ieee80211_hw *dev)
RTL818X_RX_CONF_BROADCAST |
RTL818X_RX_CONF_NICMAC;
- if (priv->r8185)
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185)
reg |= RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2;
- else {
+ else if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE1)
? RTL818X_RX_CONF_CSDM1 : 0;
reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE2)
? RTL818X_RX_CONF_CSDM2 : 0;
+ } else {
+ reg &= ~(RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2);
}
priv->rx_conf = reg;
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
- if (priv->r8185) {
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+
+ /* CW is not on per-packet basis.
+ * in rtl8185 the CW_VALUE reg is used.
+ * in rtl8187se the AC param regs are used.
+ */
+ reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+ /* retry limit IS on per-packet basis.
+ * the short and long retry limit in TX_CONF
+ * reg are ignored
+ */
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ /* TX antenna and TX gain are not on per-packet basis.
+ * TX Antenna is selected by ANTSEL reg (RX in BB regs).
+ * TX gain is selected with CCK_TX_AGC and OFDM_TX_AGC regs
+ */
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg |= RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
@@ -637,11 +1149,16 @@ static int rtl8180_start(struct ieee80211_hw *dev)
reg |= (6 << 21 /* MAX TX DMA */) |
RTL818X_TX_CONF_NO_ICV;
- if (priv->r8185)
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ reg |= 1<<30; /* "duration procedure mode" */
+
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
reg &= ~RTL818X_TX_CONF_PROBE_DTS;
else
reg &= ~RTL818X_TX_CONF_HW_SEQNUM;
+ reg &= ~RTL818X_TX_CONF_DISCW;
+
/* different meaning, same value on both rtl8185 and rtl8180 */
reg &= ~RTL818X_TX_CONF_SAT_HWPLCP;
@@ -656,7 +1173,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
err_free_rings:
rtl8180_free_rx_ring(dev);
- for (i = 0; i < 4; i++)
+ for (i = 0; i < (dev->queues + 1); i++)
if (priv->tx_ring[i].desc)
rtl8180_free_tx_ring(dev, i);
@@ -669,7 +1186,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
u8 reg;
int i;
- rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+ rtl8180_int_disable(dev);
reg = rtl818x_ioread8(priv, &priv->map->CMD);
reg &= ~RTL818X_CMD_TX_ENABLE;
@@ -686,7 +1203,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
free_irq(priv->pdev->irq, dev);
rtl8180_free_rx_ring(dev);
- for (i = 0; i < 4; i++)
+ for (i = 0; i < (dev->queues + 1); i++)
rtl8180_free_tx_ring(dev, i);
}
@@ -794,6 +1311,123 @@ static int rtl8180_config(struct ieee80211_hw *dev, u32 changed)
return 0;
}
+static void rtl8187se_conf_ac_parm(struct ieee80211_hw *dev, u8 queue)
+{
+ const struct ieee80211_tx_queue_params *params;
+ struct rtl8180_priv *priv = dev->priv;
+
+ /* hw value */
+ u32 ac_param;
+
+ u8 aifs;
+ u8 txop;
+ u8 cw_min, cw_max;
+
+ params = &priv->queue_param[queue];
+
+ cw_min = fls(params->cw_min);
+ cw_max = fls(params->cw_max);
+
+ aifs = 10 + params->aifs * priv->slot_time;
+
+ /* TODO: check if txop HW is in us (mult by 32) */
+ txop = params->txop;
+
+ ac_param = txop << AC_PARAM_TXOP_LIMIT_SHIFT |
+ cw_max << AC_PARAM_ECW_MAX_SHIFT |
+ cw_min << AC_PARAM_ECW_MIN_SHIFT |
+ aifs << AC_PARAM_AIFS_SHIFT;
+
+ switch (queue) {
+ case IEEE80211_AC_BK:
+ rtl818x_iowrite32(priv, &priv->map->AC_BK_PARAM, ac_param);
+ break;
+ case IEEE80211_AC_BE:
+ rtl818x_iowrite32(priv, &priv->map->AC_BE_PARAM, ac_param);
+ break;
+ case IEEE80211_AC_VI:
+ rtl818x_iowrite32(priv, &priv->map->AC_VI_PARAM, ac_param);
+ break;
+ case IEEE80211_AC_VO:
+ rtl818x_iowrite32(priv, &priv->map->AC_VO_PARAM, ac_param);
+ break;
+ }
+}
+
+static int rtl8180_conf_tx(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u8 cw_min, cw_max;
+
+ /* nothing to do ? */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
+ return 0;
+
+ cw_min = fls(params->cw_min);
+ cw_max = fls(params->cw_max);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ priv->queue_param[queue] = *params;
+ rtl8187se_conf_ac_parm(dev, queue);
+ } else
+ rtl818x_iowrite8(priv, &priv->map->CW_VAL,
+ (cw_max << 4) | cw_min);
+ return 0;
+}
+
+static void rtl8180_conf_erp(struct ieee80211_hw *dev,
+ struct ieee80211_bss_conf *info)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u8 sifs, difs;
+ int eifs;
+ u8 hw_eifs;
+
+ /* TODO: should we do something ? */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
+ return;
+
+ /* I _hope_ this means 10uS for the HW.
+ * In reference code it is 0x22 for
+ * both rtl8187L and rtl8187SE
+ */
+ sifs = 0x22;
+
+ if (info->use_short_slot)
+ priv->slot_time = 9;
+ else
+ priv->slot_time = 20;
+
+ /* 10 is SIFS time in uS */
+ difs = 10 + 2 * priv->slot_time;
+ eifs = 10 + difs + priv->ack_time;
+
+ /* HW should use 4uS units for EIFS (I'm sure for rtl8185)*/
+ hw_eifs = DIV_ROUND_UP(eifs, 4);
+
+
+ rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time);
+ rtl818x_iowrite8(priv, &priv->map->SIFS, sifs);
+ rtl818x_iowrite8(priv, &priv->map->DIFS, difs);
+
+ /* from reference code. set ack timeout reg = eifs reg */
+ rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, hw_eifs);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ rtl818x_iowrite8(priv, &priv->map->EIFS_8187SE, hw_eifs);
+ else if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
+ /* rtl8187/rtl8185 HW bug. After EIFS is elapsed,
+ * the HW still wait for DIFS.
+ * HW uses 4uS units for EIFS.
+ */
+ hw_eifs = DIV_ROUND_UP(eifs - difs, 4);
+
+ rtl818x_iowrite8(priv, &priv->map->EIFS, hw_eifs);
+ }
+}
+
static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -818,11 +1452,40 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
reg = RTL818X_MSR_INFRA;
} else
reg = RTL818X_MSR_NO_LINK;
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ reg |= RTL818X_MSR_ENEDCA;
+
rtl818x_iowrite8(priv, &priv->map->MSR, reg);
}
- if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
- priv->rf->conf_erp(dev, info);
+ if (changed & BSS_CHANGED_BASIC_RATES)
+ rtl8180_conf_basic_rates(dev, info->basic_rates);
+
+ if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_ERP_PREAMBLE)) {
+
+ /* when preamble changes, acktime duration changes, and erp must
+ * be recalculated. ACK time is calculated at lowest rate.
+ * Since mac80211 include SIFS time we remove it (-10)
+ */
+ priv->ack_time =
+ le16_to_cpu(ieee80211_generic_frame_duration(dev,
+ priv->vif,
+ IEEE80211_BAND_2GHZ, 10,
+ &priv->rates[0])) - 10;
+
+ rtl8180_conf_erp(dev, info);
+
+ /* mac80211 supplies aifs_n to driver and calls
+ * conf_tx callback whether aifs_n changes, NOT
+ * when aifs changes.
+ * Aifs should be recalculated if slot changes.
+ */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ for (i = 0; i < 4; i++)
+ rtl8187se_conf_ac_parm(dev, i);
+ }
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED)
vif_priv->enable_beacon = info->enable_beacon;
@@ -880,6 +1543,7 @@ static const struct ieee80211_ops rtl8180_ops = {
.remove_interface = rtl8180_remove_interface,
.config = rtl8180_config,
.bss_info_changed = rtl8180_bss_info_changed,
+ .conf_tx = rtl8180_conf_tx,
.prepare_multicast = rtl8180_prepare_multicast,
.configure_filter = rtl8180_configure_filter,
.get_tsf = rtl8180_get_tsf,
@@ -887,8 +1551,7 @@ static const struct ieee80211_ops rtl8180_ops = {
static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
{
- struct ieee80211_hw *dev = eeprom->data;
- struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_priv *priv = eeprom->data;
u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE;
@@ -899,8 +1562,7 @@ static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom)
{
- struct ieee80211_hw *dev = eeprom->data;
- struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_priv *priv = eeprom->data;
u8 reg = 2 << 6;
if (eeprom->reg_data_in)
@@ -917,6 +1579,83 @@ static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom)
udelay(10);
}
+static void rtl8180_eeprom_read(struct rtl8180_priv *priv)
+{
+ struct eeprom_93cx6 eeprom;
+ int eeprom_cck_table_adr;
+ u16 eeprom_val;
+ int i;
+
+ eeprom.data = priv;
+ eeprom.register_read = rtl8180_eeprom_register_read;
+ eeprom.register_write = rtl8180_eeprom_register_write;
+ if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
+ eeprom.width = PCI_EEPROM_WIDTH_93C66;
+ else
+ eeprom.width = PCI_EEPROM_WIDTH_93C46;
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_PROGRAM);
+ rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
+ udelay(10);
+
+ eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val);
+ eeprom_val &= 0xFF;
+ priv->rf_type = eeprom_val;
+
+ eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val);
+ priv->csthreshold = eeprom_val >> 8;
+
+ eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)priv->mac_addr, 3);
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ eeprom_cck_table_adr = 0x30;
+ else
+ eeprom_cck_table_adr = 0x10;
+
+ /* CCK TX power */
+ for (i = 0; i < 14; i += 2) {
+ u16 txpwr;
+ eeprom_93cx6_read(&eeprom, eeprom_cck_table_adr + (i >> 1),
+ &txpwr);
+ priv->channels[i].hw_value = txpwr & 0xFF;
+ priv->channels[i + 1].hw_value = txpwr >> 8;
+ }
+
+ /* OFDM TX power */
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
+ for (i = 0; i < 14; i += 2) {
+ u16 txpwr;
+ eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
+ priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
+ priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
+ }
+ }
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
+ __le32 anaparam;
+ eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2);
+ priv->anaparam = le32_to_cpu(anaparam);
+ eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
+ }
+
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+ eeprom_93cx6_read(&eeprom, 0x3F, &eeprom_val);
+ priv->antenna_diversity_en = !!(eeprom_val & 0x100);
+ priv->antenna_diversity_default = (eeprom_val & 0xC00) == 0x400;
+
+ eeprom_93cx6_read(&eeprom, 0x7C, &eeprom_val);
+ priv->xtal_out = eeprom_val & 0xF;
+ priv->xtal_in = (eeprom_val & 0xF0) >> 4;
+ priv->xtal_cal = !!(eeprom_val & 0x1000);
+ priv->thermal_meter_val = (eeprom_val & 0xF00) >> 8;
+ priv->thermal_meter_en = !!(eeprom_val & 0x2000);
+ }
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_NORMAL);
+}
+
static int rtl8180_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -924,12 +1663,9 @@ static int rtl8180_probe(struct pci_dev *pdev,
struct rtl8180_priv *priv;
unsigned long mem_addr, mem_len;
unsigned int io_addr, io_len;
- int err, i;
- struct eeprom_93cx6 eeprom;
+ int err;
const char *chip_name, *rf_name = NULL;
u32 reg;
- u16 eeprom_val;
- u8 mac_addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
@@ -1011,7 +1747,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
dev->vif_data_size = sizeof(struct rtl8180_vif);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
- dev->queues = 1;
dev->max_signal = 65;
reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
@@ -1019,43 +1754,55 @@ static int rtl8180_probe(struct pci_dev *pdev,
switch (reg) {
case RTL818X_TX_CONF_R8180_ABCD:
chip_name = "RTL8180";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8180;
break;
+
case RTL818X_TX_CONF_R8180_F:
chip_name = "RTL8180vF";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8180;
break;
+
case RTL818X_TX_CONF_R8185_ABC:
chip_name = "RTL8185";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8185;
break;
+
case RTL818X_TX_CONF_R8185_D:
chip_name = "RTL8185vD";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8185;
+ break;
+
+ case RTL818X_TX_CONF_RTL8187SE:
+ chip_name = "RTL8187SE";
+ priv->chip_family = RTL818X_CHIP_FAMILY_RTL8187SE;
break;
+
default:
printk(KERN_ERR "%s (rtl8180): Unknown chip! (0x%x)\n",
pci_name(pdev), reg >> 25);
goto err_iounmap;
}
- priv->r8185 = reg & RTL818X_TX_CONF_R8185_ABC;
- if (priv->r8185) {
+ /* we declare to MAC80211 all the queues except for beacon queue
+ * that will be eventually handled by DRV.
+ * TX rings are arranged in such a way that lower is the IDX,
+ * higher is the priority, in order to achieve direct mapping
+ * with mac80211, however the beacon queue is an exception and it
+ * is mapped on the highst tx ring IDX.
+ */
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ dev->queues = RTL8187SE_NR_TX_QUEUES - 1;
+ else
+ dev->queues = RTL8180_NR_TX_QUEUES - 1;
+
+ if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
pci_try_set_mwi(pdev);
}
- eeprom.data = dev;
- eeprom.register_read = rtl8180_eeprom_register_read;
- eeprom.register_write = rtl8180_eeprom_register_write;
- if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
- eeprom.width = PCI_EEPROM_WIDTH_93C66;
- else
- eeprom.width = PCI_EEPROM_WIDTH_93C46;
-
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_PROGRAM);
- rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
- udelay(10);
+ rtl8180_eeprom_read(priv);
- eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val);
- eeprom_val &= 0xFF;
- switch (eeprom_val) {
+ switch (priv->rf_type) {
case 1: rf_name = "Intersil";
break;
case 2: rf_name = "RFMD";
@@ -1066,14 +1813,18 @@ static int rtl8180_probe(struct pci_dev *pdev,
break;
case 5: priv->rf = &grf5101_rf_ops;
break;
- case 9: priv->rf = rtl8180_detect_rf(dev);
+ case 9:
+ if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+ priv->rf = rtl8187se_detect_rf(dev);
+ else
+ priv->rf = rtl8180_detect_rf(dev);
break;
case 10:
rf_name = "RTL8255";
break;
default:
printk(KERN_ERR "%s (rtl8180): Unknown RF! (0x%x)\n",
- pci_name(pdev), eeprom_val);
+ pci_name(pdev), priv->rf_type);
goto err_iounmap;
}
@@ -1083,42 +1834,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
goto err_iounmap;
}
- eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val);
- priv->csthreshold = eeprom_val >> 8;
- if (!priv->r8185) {
- __le32 anaparam;
- eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2);
- priv->anaparam = le32_to_cpu(anaparam);
- eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
- }
-
- eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)mac_addr, 3);
- if (!is_valid_ether_addr(mac_addr)) {
+ if (!is_valid_ether_addr(priv->mac_addr)) {
printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
" randomly generated MAC addr\n", pci_name(pdev));
- eth_random_addr(mac_addr);
- }
- SET_IEEE80211_PERM_ADDR(dev, mac_addr);
-
- /* CCK TX power */
- for (i = 0; i < 14; i += 2) {
- u16 txpwr;
- eeprom_93cx6_read(&eeprom, 0x10 + (i >> 1), &txpwr);
- priv->channels[i].hw_value = txpwr & 0xFF;
- priv->channels[i + 1].hw_value = txpwr >> 8;
- }
-
- /* OFDM TX power */
- if (priv->r8185) {
- for (i = 0; i < 14; i += 2) {
- u16 txpwr;
- eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
- priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
- priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
- }
+ eth_random_addr(priv->mac_addr);
}
-
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+ SET_IEEE80211_PERM_ADDR(dev, priv->mac_addr);
spin_lock_init(&priv->lock);
@@ -1130,12 +1851,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
}
wiphy_info(dev->wiphy, "hwaddr %pm, %s + %s\n",
- mac_addr, chip_name, priv->rf->name);
+ priv->mac_addr, chip_name, priv->rf->name);
return 0;
err_iounmap:
- iounmap(priv->map);
+ pci_iounmap(pdev, priv->map);
err_free_dev:
ieee80211_free_hw(dev);
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
index 30523314da43..291a55970d1a 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
@@ -24,27 +24,64 @@
#define ANAPARAM_PWR1_SHIFT 20
#define ANAPARAM_PWR1_MASK (0x7F << ANAPARAM_PWR1_SHIFT)
+/* rtl8180/rtl8185 have 3 queue + beacon queue.
+ * mac80211 can use just one, + beacon = 2 tot.
+ */
+#define RTL8180_NR_TX_QUEUES 2
+
+/* rtl8187SE have 6 queues + beacon queues
+ * mac80211 can use 4 QoS data queue, + beacon = 5 tot
+ */
+#define RTL8187SE_NR_TX_QUEUES 5
+
+/* for array static allocation, it is the max of above */
+#define RTL818X_NR_TX_QUEUES 5
+
struct rtl8180_tx_desc {
__le32 flags;
__le16 rts_duration;
__le16 plcp_len;
__le32 tx_buf;
- __le32 frame_len;
+ union{
+ __le32 frame_len;
+ struct {
+ __le16 frame_len_se;
+ __le16 frame_duration;
+ } __packed;
+ } __packed;
__le32 next_tx_desc;
u8 cw;
u8 retry_limit;
u8 agc;
u8 flags2;
- u32 reserved[2];
+ /* rsvd for 8180/8185.
+ * valid for 8187se but we dont use it
+ */
+ u32 reserved;
+ /* all rsvd for 8180/8185 */
+ __le16 flags3;
+ __le16 frag_qsize;
+} __packed;
+
+struct rtl818x_rx_cmd_desc {
+ __le32 flags;
+ u32 reserved;
+ __le32 rx_buf;
} __packed;
struct rtl8180_rx_desc {
__le32 flags;
__le32 flags2;
- union {
- __le32 rx_buf;
- __le64 tsft;
- };
+ __le64 tsft;
+
+} __packed;
+
+struct rtl8187se_rx_desc {
+ __le32 flags;
+ __le64 tsft;
+ __le32 flags2;
+ __le32 flags3;
+ u32 reserved[3];
} __packed;
struct rtl8180_tx_ring {
@@ -71,28 +108,45 @@ struct rtl8180_priv {
/* rtl8180 driver specific */
spinlock_t lock;
- struct rtl8180_rx_desc *rx_ring;
+ void *rx_ring;
+ u8 rx_ring_sz;
dma_addr_t rx_ring_dma;
unsigned int rx_idx;
struct sk_buff *rx_buf[32];
- struct rtl8180_tx_ring tx_ring[4];
+ struct rtl8180_tx_ring tx_ring[RTL818X_NR_TX_QUEUES];
struct ieee80211_channel channels[14];
struct ieee80211_rate rates[12];
struct ieee80211_supported_band band;
+ struct ieee80211_tx_queue_params queue_param[4];
struct pci_dev *pdev;
u32 rx_conf;
-
- int r8185;
+ u8 slot_time;
+ u16 ack_time;
+
+ enum {
+ RTL818X_CHIP_FAMILY_RTL8180,
+ RTL818X_CHIP_FAMILY_RTL8185,
+ RTL818X_CHIP_FAMILY_RTL8187SE,
+ } chip_family;
u32 anaparam;
u16 rfparam;
u8 csthreshold;
-
+ u8 mac_addr[ETH_ALEN];
+ u8 rf_type;
+ u8 xtal_out;
+ u8 xtal_in;
+ u8 xtal_cal;
+ u8 thermal_meter_val;
+ u8 thermal_meter_en;
+ u8 antenna_diversity_en;
+ u8 antenna_diversity_default;
/* sequence # */
u16 seqno;
};
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam);
+void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2);
static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr)
{
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index d60a5f399022..9bda5bc78eda 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -282,6 +282,7 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
msleep(1); /* FIXME: optional? */
+ /* TODO: use set_anaparam2 dev.c_func*/
/* anaparam2 on */
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
@@ -730,32 +731,11 @@ static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
msleep(10);
}
-static void rtl8225_rf_conf_erp(struct ieee80211_hw *dev,
- struct ieee80211_bss_conf *info)
-{
- struct rtl8180_priv *priv = dev->priv;
-
- if (info->use_short_slot) {
- rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
- rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
- rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
- rtl818x_iowrite8(priv, &priv->map->EIFS, 81);
- rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0x73);
- } else {
- rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14);
- rtl818x_iowrite8(priv, &priv->map->SIFS, 0x44);
- rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24);
- rtl818x_iowrite8(priv, &priv->map->EIFS, 81);
- rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0xa5);
- }
-}
-
static const struct rtl818x_rf_ops rtl8225_ops = {
.name = "rtl8225",
.init = rtl8225_rf_init,
.stop = rtl8225_rf_stop,
.set_chan = rtl8225_rf_set_channel,
- .conf_erp = rtl8225_rf_conf_erp,
};
static const struct rtl818x_rf_ops rtl8225z2_ops = {
@@ -763,7 +743,6 @@ static const struct rtl818x_rf_ops rtl8225z2_ops = {
.init = rtl8225z2_rf_init,
.stop = rtl8225_rf_stop,
.set_chan = rtl8225_rf_set_channel,
- .conf_erp = rtl8225_rf_conf_erp,
};
const struct rtl818x_rf_ops * rtl8180_detect_rf(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
new file mode 100644
index 000000000000..fde89866fa8d
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
@@ -0,0 +1,475 @@
+
+/* Radio tuning for RTL8225 on RTL8187SE
+ *
+ * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
+ * Copyright 2014 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * Based on the r8180 and Realtek r8187se drivers, which are:
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
+ *
+ * Also based on the rtl8187 driver, which is:
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+
+#include "rtl8180.h"
+#include "rtl8225se.h"
+
+#define PFX "rtl8225 (se) "
+
+static const u32 RF_GAIN_TABLE[] = {
+ 0x0096, 0x0076, 0x0056, 0x0036, 0x0016, 0x01f6, 0x01d6, 0x01b6,
+ 0x0196, 0x0176, 0x00F7, 0x00D7, 0x00B7, 0x0097, 0x0077, 0x0057,
+ 0x0037, 0x00FB, 0x00DB, 0x00BB, 0x00FF, 0x00E3, 0x00C3, 0x00A3,
+ 0x0083, 0x0063, 0x0043, 0x0023, 0x0003, 0x01E3, 0x01C3, 0x01A3,
+ 0x0183, 0x0163, 0x0143, 0x0123, 0x0103
+};
+
+static const u8 cck_ofdm_gain_settings[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
+ 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
+ 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+};
+
+static const u8 rtl8225se_tx_gain_cck_ofdm[] = {
+ 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
+};
+
+static const u8 rtl8225se_tx_power_cck[] = {
+ 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
+ 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
+ 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
+ 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
+ 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
+ 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
+};
+
+static const u8 rtl8225se_tx_power_cck_ch14[] = {
+ 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
+ 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
+ 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
+ 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
+ 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 rtl8225se_tx_power_ofdm[] = {
+ 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
+};
+
+static const u32 rtl8225se_chan[] = {
+ 0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380,
+ 0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A,
+};
+
+static const u8 rtl8225sez2_tx_power_cck_ch14[] = {
+ 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 rtl8225sez2_tx_power_cck_B[] = {
+ 0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x04
+};
+
+static const u8 rtl8225sez2_tx_power_cck_A[] = {
+ 0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04
+};
+
+static const u8 rtl8225sez2_tx_power_cck[] = {
+ 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
+};
+
+static const u8 ZEBRA_AGC[] = {
+ 0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A,
+ 0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72,
+ 0x71, 0x70, 0x6F, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A,
+ 0x69, 0x68, 0x67, 0x66, 0x65, 0x64, 0x63, 0x62,
+ 0x48, 0x47, 0x46, 0x45, 0x44, 0x29, 0x28, 0x27,
+ 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x08, 0x07,
+ 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16,
+ 0x17, 0x17, 0x18, 0x18, 0x19, 0x1a, 0x1a, 0x1b,
+ 0x1b, 0x1c, 0x1c, 0x1d, 0x1d, 0x1d, 0x1e, 0x1e,
+ 0x1f, 0x1f, 0x1f, 0x20, 0x20, 0x20, 0x20, 0x21,
+ 0x21, 0x21, 0x22, 0x22, 0x22, 0x23, 0x23, 0x24,
+ 0x24, 0x25, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27,
+ 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F
+};
+
+static const u8 OFDM_CONFIG[] = {
+ 0x10, 0x0F, 0x0A, 0x0C, 0x14, 0xFA, 0xFF, 0x50,
+ 0x00, 0x50, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00,
+ 0x40, 0x00, 0x40, 0x00, 0x00, 0x00, 0xA8, 0x26,
+ 0x32, 0x33, 0x06, 0xA5, 0x6F, 0x55, 0xC8, 0xBB,
+ 0x0A, 0xE1, 0x2C, 0x4A, 0x86, 0x83, 0x34, 0x00,
+ 0x4F, 0x24, 0x6F, 0xC2, 0x03, 0x40, 0x80, 0x00,
+ 0xC0, 0xC1, 0x58, 0xF1, 0x00, 0xC4, 0x90, 0x3e,
+ 0xD8, 0x3C, 0x7B, 0x10, 0x10
+};
+
+static void rtl8187se_three_wire_io(struct ieee80211_hw *dev, u8 *data,
+ u8 len, bool write)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ int i;
+ u8 tmp;
+
+ do {
+ for (i = 0; i < 5; i++) {
+ tmp = rtl818x_ioread8(priv, SW_3W_CMD1);
+ if (!(tmp & 0x3))
+ break;
+ udelay(10);
+ }
+ if (i == 5)
+ wiphy_err(dev->wiphy, PFX
+ "CmdReg: 0x%x RE/WE bits aren't clear\n", tmp);
+
+ tmp = rtl818x_ioread8(priv, &priv->map->rf_sw_config) | 0x02;
+ rtl818x_iowrite8(priv, &priv->map->rf_sw_config, tmp);
+
+ tmp = rtl818x_ioread8(priv, REG_ADDR1(0x84)) & 0xF7;
+ rtl818x_iowrite8(priv, REG_ADDR1(0x84), tmp);
+ if (write) {
+ if (len == 16) {
+ rtl818x_iowrite16(priv, SW_3W_DB0,
+ *(u16 *)data);
+ } else if (len == 64) {
+ rtl818x_iowrite32(priv, SW_3W_DB0_4,
+ *((u32 *)data));
+ rtl818x_iowrite32(priv, SW_3W_DB1_4,
+ *((u32 *)(data + 4)));
+ } else
+ wiphy_err(dev->wiphy, PFX
+ "Unimplemented length\n");
+ } else {
+ rtl818x_iowrite16(priv, SW_3W_DB0, *(u16 *)data);
+ }
+ if (write)
+ tmp = 2;
+ else
+ tmp = 1;
+ rtl818x_iowrite8(priv, SW_3W_CMD1, tmp);
+ for (i = 0; i < 5; i++) {
+ tmp = rtl818x_ioread8(priv, SW_3W_CMD1);
+ if (!(tmp & 0x3))
+ break;
+ udelay(10);
+ }
+ rtl818x_iowrite8(priv, SW_3W_CMD1, 0);
+ if (!write) {
+ *((u16 *)data) = rtl818x_ioread16(priv, SI_DATA_REG);
+ *((u16 *)data) &= 0x0FFF;
+ }
+ } while (0);
+}
+
+static u32 rtl8187se_rf_readreg(struct ieee80211_hw *dev, u8 addr)
+{
+ u32 dataread = addr & 0x0F;
+ rtl8187se_three_wire_io(dev, (u8 *)&dataread, 16, 0);
+ return dataread;
+}
+
+static void rtl8187se_rf_writereg(struct ieee80211_hw *dev, u8 addr, u32 data)
+{
+ u32 outdata = (data << 4) | (u32)(addr & 0x0F);
+ rtl8187se_three_wire_io(dev, (u8 *)&outdata, 16, 1);
+}
+
+
+static void rtl8225se_write_zebra_agc(struct ieee80211_hw *dev)
+{
+ int i;
+
+ for (i = 0; i < 128; i++) {
+ rtl8225se_write_phy_ofdm(dev, 0xF, ZEBRA_AGC[i]);
+ rtl8225se_write_phy_ofdm(dev, 0xE, i+0x80);
+ rtl8225se_write_phy_ofdm(dev, 0xE, 0);
+ }
+}
+
+static void rtl8187se_write_ofdm_config(struct ieee80211_hw *dev)
+{
+ /* write OFDM_CONFIG table */
+ int i;
+
+ for (i = 0; i < 60; i++)
+ rtl8225se_write_phy_ofdm(dev, i, OFDM_CONFIG[i]);
+
+}
+
+static void rtl8225sez2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u8 cck_power, ofdm_power;
+
+ cck_power = priv->channels[channel - 1].hw_value & 0xFF;
+ if (cck_power > 35)
+ cck_power = 35;
+ rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
+ cck_ofdm_gain_settings[cck_power]);
+
+ usleep_range(1000, 5000);
+ ofdm_power = priv->channels[channel - 1].hw_value >> 8;
+ if (ofdm_power > 35)
+ ofdm_power = 35;
+
+ rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
+ cck_ofdm_gain_settings[ofdm_power]);
+ if (ofdm_power < 12) {
+ rtl8225se_write_phy_ofdm(dev, 7, 0x5C);
+ rtl8225se_write_phy_ofdm(dev, 9, 0x5C);
+ }
+ if (ofdm_power < 18) {
+ rtl8225se_write_phy_ofdm(dev, 7, 0x54);
+ rtl8225se_write_phy_ofdm(dev, 9, 0x54);
+ } else {
+ rtl8225se_write_phy_ofdm(dev, 7, 0x50);
+ rtl8225se_write_phy_ofdm(dev, 9, 0x50);
+ }
+
+ usleep_range(1000, 5000);
+}
+
+static void rtl8187se_write_rf_gain(struct ieee80211_hw *dev)
+{
+ int i;
+
+ for (i = 0; i <= 36; i++) {
+ rtl8187se_rf_writereg(dev, 0x01, i); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x02, RF_GAIN_TABLE[i]); mdelay(1);
+ }
+}
+
+static void rtl8187se_write_initial_gain(struct ieee80211_hw *dev,
+ int init_gain)
+{
+ switch (init_gain) {
+ default:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x26); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFA); mdelay(1);
+ break;
+ case 2:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFA); mdelay(1);
+ break;
+ case 3:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
+ break;
+ case 4:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
+ break;
+ case 5:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
+ break;
+ case 6:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
+ break;
+ case 7:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0xA6); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
+ break;
+ case 8:
+ rtl8225se_write_phy_ofdm(dev, 0x17, 0x66); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x24, 0xB6); mdelay(1);
+ rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
+ break;
+ }
+}
+
+void rtl8225se_rf_init(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+ u32 rf23, rf24;
+ u8 d_cut = 0;
+ u8 tmp;
+
+ /* Page 1 */
+ rtl8187se_rf_writereg(dev, 0x00, 0x013F); mdelay(1);
+ rf23 = rtl8187se_rf_readreg(dev, 0x08); mdelay(1);
+ rf24 = rtl8187se_rf_readreg(dev, 0x09); mdelay(1);
+ if (rf23 == 0x0818 && rf24 == 0x070C)
+ d_cut = 1;
+
+ wiphy_info(dev->wiphy, "RTL8225-SE version %s\n",
+ d_cut ? "D" : "not-D");
+
+ /* Page 0: reg 0 - 15 */
+ rtl8187se_rf_writereg(dev, 0x00, 0x009F); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x01, 0x06E0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x03, 0x07F1); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x05, 0x0C72); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x06, 0x0AE6); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x00CA); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x08, 0x0E1C); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x09, 0x02F0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0A, 0x09D0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0B, 0x01BA); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0C, 0x0640); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0E, 0x0020); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0F, 0x0990); mdelay(1);
+ /* page 1: reg 16-30 */
+ rtl8187se_rf_writereg(dev, 0x00, 0x013F); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x03, 0x0806); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x04, 0x03A7); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x05, 0x059B); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x06, 0x0081); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x01A0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0A, 0x0001); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0B, 0x0418); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0C, 0x0FBE); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0D, 0x0008); mdelay(1);
+ if (d_cut)
+ rtl8187se_rf_writereg(dev, 0x0E, 0x0807);
+ else
+ rtl8187se_rf_writereg(dev, 0x0E, 0x0806);
+ mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0F, 0x0ACC); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x00, 0x01D7); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x03, 0x0E00); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0E50); mdelay(1);
+
+ rtl8187se_write_rf_gain(dev);
+
+ rtl8187se_rf_writereg(dev, 0x05, 0x0203); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x06, 0x0200); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x0D, 0x0008); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
+ rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x0220); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x07, 0x03E0); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x06, 0x00C1); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0A, 0x0001); mdelay(1);
+ if (priv->xtal_cal) {
+ tmp = (priv->xtal_in << 4) | (priv->xtal_out << 1) |
+ (1 << 11) | (1 << 9);
+ rtl8187se_rf_writereg(dev, 0x0F, tmp);
+ wiphy_info(dev->wiphy, "Xtal cal\n");
+ mdelay(1);
+ } else {
+ wiphy_info(dev->wiphy, "NO Xtal cal\n");
+ rtl8187se_rf_writereg(dev, 0x0F, 0x0ACC);
+ mdelay(1);
+ }
+ /* page 0 */
+ rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
+
+ rtl8187se_rf_writereg(dev, 0x00, 0x009F); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x01, 0x0000); mdelay(1);
+ rtl8187se_rf_writereg(dev, 0x02, 0x0000); mdelay(1);
+ /* power save parameters */
+ /* TODO: move to dev.c */
+ rtl818x_iowrite8(priv, REG_ADDR1(0x024E),
+ rtl818x_ioread8(priv, REG_ADDR1(0x24E)) & 0x9F);
+ rtl8225se_write_phy_cck(dev, 0x00, 0xC8);
+ rtl8225se_write_phy_cck(dev, 0x06, 0x1C);
+ rtl8225se_write_phy_cck(dev, 0x10, 0x78);
+ rtl8225se_write_phy_cck(dev, 0x2E, 0xD0);
+ rtl8225se_write_phy_cck(dev, 0x2F, 0x06);
+ rtl8225se_write_phy_cck(dev, 0x01, 0x46);
+
+ /* power control */
+ rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x10);
+ rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x1B);
+
+ rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
+ rtl8225se_write_phy_ofdm(dev, 0x00, 0x12);
+
+ rtl8225se_write_zebra_agc(dev);
+
+ rtl8225se_write_phy_ofdm(dev, 0x10, 0x00);
+
+ rtl8187se_write_ofdm_config(dev);
+
+ /* turn on RF */
+ rtl8187se_rf_writereg(dev, 0x00, 0x009F); udelay(500);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0972); udelay(500);
+ /* turn on RF again */
+ rtl8187se_rf_writereg(dev, 0x00, 0x009F); udelay(500);
+ rtl8187se_rf_writereg(dev, 0x04, 0x0972); udelay(500);
+ /* turn on BB */
+ rtl8225se_write_phy_ofdm(dev, 0x10, 0x40);
+ rtl8225se_write_phy_ofdm(dev, 0x12, 0x40);
+
+ rtl8187se_write_initial_gain(dev, 4);
+}
+
+void rtl8225se_rf_stop(struct ieee80211_hw *dev)
+{
+ /* checked for 8187se */
+ struct rtl8180_priv *priv = dev->priv;
+
+ /* turn off BB RXIQ matrix to cut off rx signal */
+ rtl8225se_write_phy_ofdm(dev, 0x10, 0x00);
+ rtl8225se_write_phy_ofdm(dev, 0x12, 0x00);
+ /* turn off RF */
+ rtl8187se_rf_writereg(dev, 0x04, 0x0000);
+ rtl8187se_rf_writereg(dev, 0x00, 0x0000);
+
+ usleep_range(1000, 5000);
+ /* turn off A/D and D/A */
+ rtl8180_set_anaparam(priv, RTL8225SE_ANAPARAM_OFF);
+ rtl8180_set_anaparam2(priv, RTL8225SE_ANAPARAM2_OFF);
+}
+
+void rtl8225se_rf_set_channel(struct ieee80211_hw *dev,
+ struct ieee80211_conf *conf)
+{
+ int chan =
+ ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
+
+ rtl8225sez2_rf_set_tx_power(dev, chan);
+ rtl8187se_rf_writereg(dev, 0x7, rtl8225se_chan[chan - 1]);
+ if ((rtl8187se_rf_readreg(dev, 0x7) & 0x0F80) !=
+ rtl8225se_chan[chan - 1])
+ rtl8187se_rf_writereg(dev, 0x7, rtl8225se_chan[chan - 1]);
+ usleep_range(10000, 20000);
+}
+
+static const struct rtl818x_rf_ops rtl8225se_ops = {
+ .name = "rtl8225-se",
+ .init = rtl8225se_rf_init,
+ .stop = rtl8225se_rf_stop,
+ .set_chan = rtl8225se_rf_set_channel,
+};
+
+const struct rtl818x_rf_ops *rtl8187se_detect_rf(struct ieee80211_hw *dev)
+{
+ return &rtl8225se_ops;
+}
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
new file mode 100644
index 000000000000..229400264088
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
@@ -0,0 +1,61 @@
+
+/* Definitions for RTL8187SE hardware
+ *
+ * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
+ * Copyright 2014 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * Based on the r8180 and Realtek r8187se drivers, which are:
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
+ *
+ * Also based on the rtl8187 driver, which is:
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RTL8187SE_RTL8225_H
+#define RTL8187SE_RTL8225_H
+
+#define RTL8225SE_ANAPARAM_ON 0xb0054d00
+#define RTL8225SE_ANAPARAM2_ON 0x000004c6
+
+/* all off except PLL */
+#define RTL8225SE_ANAPARAM_OFF 0xb0054dec
+/* all on including PLL */
+#define RTL8225SE_ANAPARAM_OFF2 0xb0054dfc
+
+#define RTL8225SE_ANAPARAM2_OFF 0x00ff04c6
+
+#define RTL8225SE_ANAPARAM3 0x10
+
+enum rtl8187se_power_state {
+ RTL8187SE_POWER_ON,
+ RTL8187SE_POWER_OFF,
+ RTL8187SE_POWER_SLEEP
+};
+
+static inline void rtl8225se_write_phy_ofdm(struct ieee80211_hw *dev,
+ u8 addr, u8 data)
+{
+ rtl8180_write_phy(dev, addr, data);
+}
+
+static inline void rtl8225se_write_phy_cck(struct ieee80211_hw *dev,
+ u8 addr, u8 data)
+{
+ rtl8180_write_phy(dev, addr, data | 0x10000);
+}
+
+
+const struct rtl818x_rf_ops *rtl8187se_detect_rf(struct ieee80211_hw *);
+void rtl8225se_rf_stop(struct ieee80211_hw *dev);
+void rtl8225se_rf_set_channel(struct ieee80211_hw *dev,
+ struct ieee80211_conf *conf);
+void rtl8225se_rf_conf_erp(struct ieee80211_hw *dev,
+ struct ieee80211_bss_conf *info);
+void rtl8225se_rf_init(struct ieee80211_hw *dev);
+
+#endif /* RTL8187SE_RTL8225_H */
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index fd78df813a85..0ca17cda48fa 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -592,7 +592,7 @@ static void rtl8187_set_anaparam(struct rtl8187_priv *priv, bool rfon)
rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
if (priv->is_rtl8187b)
- rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, anaparam3);
+ rtl818x_iowrite8(priv, &priv->map->ANAPARAM3A, anaparam3);
reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
@@ -785,7 +785,7 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
/* Auto Rate Fallback Register (ARFR): 1M-54M setting */
@@ -943,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
@@ -986,13 +986,13 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
@@ -1636,10 +1636,10 @@ static int rtl8187_probe(struct usb_interface *intf,
err_free_dmabuf:
kfree(priv->io_dmabuf);
- err_free_dev:
- ieee80211_free_hw(dev);
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
+ err_free_dev:
+ ieee80211_free_hw(dev);
return err;
}
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index ce23dfd42381..45ea4e1c4abe 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -16,30 +16,82 @@
#define RTL818X_H
struct rtl818x_csr {
+
u8 MAC[6];
u8 reserved_0[2];
- __le32 MAR[2];
- u8 RX_FIFO_COUNT;
- u8 reserved_1;
- u8 TX_FIFO_COUNT;
- u8 BQREQ;
- u8 reserved_2[4];
+
+ union {
+ __le32 MAR[2]; /* 0x8 */
+
+ struct{ /* rtl8187se */
+ u8 rf_sw_config; /* 0x8 */
+ u8 reserved_01[3];
+ __le32 TMGDA; /* 0xc */
+ } __packed;
+ } __packed;
+
+ union { /* 0x10 */
+ struct {
+ u8 RX_FIFO_COUNT;
+ u8 reserved_1;
+ u8 TX_FIFO_COUNT;
+ u8 BQREQ;
+ } __packed;
+
+ __le32 TBKDA; /* for 8187se */
+ } __packed;
+
+ __le32 TBEDA; /* 0x14 - for rtl8187se */
+
__le32 TSFT[2];
- __le32 TLPDA;
- __le32 TNPDA;
- __le32 THPDA;
- __le16 BRSR;
- u8 BSSID[6];
- u8 RESP_RATE;
- u8 EIFS;
- u8 reserved_3[1];
- u8 CMD;
+
+ union { /* 0x20 */
+ __le32 TLPDA;
+ __le32 TVIDA; /* for 8187se */
+ } __packed;
+
+ union { /* 0x24 */
+ __le32 TNPDA;
+ __le32 TVODA; /* for 8187se */
+ } __packed;
+
+ /* hi pri ring for all cards */
+ __le32 THPDA; /* 0x28 */
+
+ union { /* 0x2c */
+ struct {
+ u8 reserved_2a;
+ u8 EIFS_8187SE;
+ } __packed;
+
+ __le16 BRSR;
+ } __packed;
+
+ u8 BSSID[6]; /* 0x2e */
+
+ union { /* 0x34 */
+ struct {
+ u8 RESP_RATE;
+ u8 EIFS;
+ } __packed;
+ __le16 BRSR_8187SE;
+ } __packed;
+
+ u8 reserved_3[1]; /* 0x36 */
+ u8 CMD; /* 0x37 */
#define RTL818X_CMD_TX_ENABLE (1 << 2)
#define RTL818X_CMD_RX_ENABLE (1 << 3)
#define RTL818X_CMD_RESET (1 << 4)
- u8 reserved_4[4];
- __le16 INT_MASK;
- __le16 INT_STATUS;
+ u8 reserved_4[4]; /* 0x38 */
+ union {
+ struct {
+ __le16 INT_MASK;
+ __le16 INT_STATUS;
+ } __packed;
+
+ __le32 INT_STATUS_SE; /* 0x3c */
+ } __packed;
+/* status bits for rtl8187 and rtl8180/8185 */
#define RTL818X_INT_RX_OK (1 << 0)
#define RTL818X_INT_RX_ERR (1 << 1)
#define RTL818X_INT_TXL_OK (1 << 2)
@@ -56,7 +108,34 @@ struct rtl818x_csr {
#define RTL818X_INT_BEACON (1 << 13)
#define RTL818X_INT_TIME_OUT (1 << 14)
#define RTL818X_INT_TX_FO (1 << 15)
- __le32 TX_CONF;
+/* status bits for rtl8187se */
+#define RTL818X_INT_SE_TIMER3 (1 << 0)
+#define RTL818X_INT_SE_TIMER2 (1 << 1)
+#define RTL818X_INT_SE_RQ0SOR (1 << 2)
+#define RTL818X_INT_SE_TXBED_OK (1 << 3)
+#define RTL818X_INT_SE_TXBED_ERR (1 << 4)
+#define RTL818X_INT_SE_TXBE_OK (1 << 5)
+#define RTL818X_INT_SE_TXBE_ERR (1 << 6)
+#define RTL818X_INT_SE_RX_OK (1 << 7)
+#define RTL818X_INT_SE_RX_ERR (1 << 8)
+#define RTL818X_INT_SE_TXL_OK (1 << 9)
+#define RTL818X_INT_SE_TXL_ERR (1 << 10)
+#define RTL818X_INT_SE_RX_DU (1 << 11)
+#define RTL818X_INT_SE_RX_FIFO (1 << 12)
+#define RTL818X_INT_SE_TXN_OK (1 << 13)
+#define RTL818X_INT_SE_TXN_ERR (1 << 14)
+#define RTL818X_INT_SE_TXH_OK (1 << 15)
+#define RTL818X_INT_SE_TXH_ERR (1 << 16)
+#define RTL818X_INT_SE_TXB_OK (1 << 17)
+#define RTL818X_INT_SE_TXB_ERR (1 << 18)
+#define RTL818X_INT_SE_ATIM_TO (1 << 19)
+#define RTL818X_INT_SE_BK_TO (1 << 20)
+#define RTL818X_INT_SE_TIMER1 (1 << 21)
+#define RTL818X_INT_SE_TX_FIFO (1 << 22)
+#define RTL818X_INT_SE_WAKEUP (1 << 23)
+#define RTL818X_INT_SE_BK_DMA (1 << 24)
+#define RTL818X_INT_SE_TMGD_OK (1 << 30)
+ __le32 TX_CONF; /* 0x40 */
#define RTL818X_TX_CONF_LOOPBACK_MAC (1 << 17)
#define RTL818X_TX_CONF_LOOPBACK_CONT (3 << 17)
#define RTL818X_TX_CONF_NO_ICV (1 << 19)
@@ -68,6 +147,7 @@ struct rtl818x_csr {
#define RTL818X_TX_CONF_R8185_D (5 << 25)
#define RTL818X_TX_CONF_R8187vD (5 << 25)
#define RTL818X_TX_CONF_R8187vD_B (6 << 25)
+#define RTL818X_TX_CONF_RTL8187SE (6 << 25)
#define RTL818X_TX_CONF_HWVER_MASK (7 << 25)
#define RTL818X_TX_CONF_DISREQQSIZE (1 << 28)
#define RTL818X_TX_CONF_PROBE_DTS (1 << 29)
@@ -122,31 +202,67 @@ struct rtl818x_csr {
u8 PGSELECT;
u8 SECURITY;
__le32 ANAPARAM2;
- u8 reserved_10[12];
- __le16 BEACON_INTERVAL;
- __le16 ATIM_WND;
- __le16 BEACON_INTERVAL_TIME;
- __le16 ATIMTR_INTERVAL;
- u8 PHY_DELAY;
- u8 CARRIER_SENSE_COUNTER;
- u8 reserved_11[2];
- u8 PHY[4];
- __le16 RFPinsOutput;
- __le16 RFPinsEnable;
- __le16 RFPinsSelect;
- __le16 RFPinsInput;
- __le32 RF_PARA;
- __le32 RF_TIMING;
- u8 GP_ENABLE;
- u8 GPIO0;
- u8 GPIO1;
- u8 reserved_12;
- __le32 HSSI_PARA;
- u8 reserved_13[4];
- u8 TX_AGC_CTL;
-#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT (1 << 0)
-#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT (1 << 1)
-#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
+ u8 reserved_10[8];
+ __le32 IMR; /* 0x6c - Interrupt mask reg for 8187se */
+#define IMR_TMGDOK ((1 << 30))
+#define IMR_DOT11HINT ((1 << 25)) /* 802.11h Measurement Interrupt */
+#define IMR_BCNDMAINT ((1 << 24)) /* Beacon DMA Interrupt */
+#define IMR_WAKEINT ((1 << 23)) /* Wake Up Interrupt */
+#define IMR_TXFOVW ((1 << 22)) /* Tx FIFO Overflow */
+#define IMR_TIMEOUT1 ((1 << 21)) /* Time Out Interrupt 1 */
+#define IMR_BCNINT ((1 << 20)) /* Beacon Time out */
+#define IMR_ATIMINT ((1 << 19)) /* ATIM Time Out */
+#define IMR_TBDER ((1 << 18)) /* Tx Beacon Descriptor Error */
+#define IMR_TBDOK ((1 << 17)) /* Tx Beacon Descriptor OK */
+#define IMR_THPDER ((1 << 16)) /* Tx High Priority Descriptor Error */
+#define IMR_THPDOK ((1 << 15)) /* Tx High Priority Descriptor OK */
+#define IMR_TVODER ((1 << 14)) /* Tx AC_VO Descriptor Error Int */
+#define IMR_TVODOK ((1 << 13)) /* Tx AC_VO Descriptor OK Interrupt */
+#define IMR_FOVW ((1 << 12)) /* Rx FIFO Overflow Interrupt */
+#define IMR_RDU ((1 << 11)) /* Rx Descriptor Unavailable */
+#define IMR_TVIDER ((1 << 10)) /* Tx AC_VI Descriptor Error */
+#define IMR_TVIDOK ((1 << 9)) /* Tx AC_VI Descriptor OK Interrupt */
+#define IMR_RER ((1 << 8)) /* Rx Error Interrupt */
+#define IMR_ROK ((1 << 7)) /* Receive OK Interrupt */
+#define IMR_TBEDER ((1 << 6)) /* Tx AC_BE Descriptor Error */
+#define IMR_TBEDOK ((1 << 5)) /* Tx AC_BE Descriptor OK */
+#define IMR_TBKDER ((1 << 4)) /* Tx AC_BK Descriptor Error */
+#define IMR_TBKDOK ((1 << 3)) /* Tx AC_BK Descriptor OK */
+#define IMR_RQOSOK ((1 << 2)) /* Rx QoS OK Interrupt */
+#define IMR_TIMEOUT2 ((1 << 1)) /* Time Out Interrupt 2 */
+#define IMR_TIMEOUT3 ((1 << 0)) /* Time Out Interrupt 3 */
+ __le16 BEACON_INTERVAL; /* 0x70 */
+ __le16 ATIM_WND; /* 0x72 */
+ __le16 BEACON_INTERVAL_TIME; /* 0x74 */
+ __le16 ATIMTR_INTERVAL; /* 0x76 */
+ u8 PHY_DELAY; /* 0x78 */
+ u8 CARRIER_SENSE_COUNTER; /* 0x79 */
+ u8 reserved_11[2]; /* 0x7a */
+ u8 PHY[4]; /* 0x7c */
+ __le16 RFPinsOutput; /* 0x80 */
+ __le16 RFPinsEnable; /* 0x82 */
+ __le16 RFPinsSelect; /* 0x84 */
+ __le16 RFPinsInput; /* 0x86 */
+ __le32 RF_PARA; /* 0x88 */
+ __le32 RF_TIMING; /* 0x8c */
+ u8 GP_ENABLE; /* 0x90 */
+ u8 GPIO0; /* 0x91 */
+ u8 GPIO1; /* 0x92 */
+ u8 TPPOLL_STOP; /* 0x93 - rtl8187se only */
+#define RTL818x_TPPOLL_STOP_BQ (1 << 7)
+#define RTL818x_TPPOLL_STOP_VI (1 << 4)
+#define RTL818x_TPPOLL_STOP_VO (1 << 5)
+#define RTL818x_TPPOLL_STOP_BE (1 << 3)
+#define RTL818x_TPPOLL_STOP_BK (1 << 2)
+#define RTL818x_TPPOLL_STOP_MG (1 << 1)
+#define RTL818x_TPPOLL_STOP_HI (1 << 6)
+
+ __le32 HSSI_PARA; /* 0x94 */
+ u8 reserved_13[4]; /* 0x98 */
+ u8 TX_AGC_CTL; /* 0x9c */
+#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN (1 << 0)
+#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL (1 << 1)
+#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
u8 TX_GAIN_CCK;
u8 TX_GAIN_OFDM;
u8 TX_ANTENNA;
@@ -158,8 +274,8 @@ struct rtl818x_csr {
u8 SLOT;
u8 reserved_16[5];
u8 CW_CONF;
-#define RTL818X_CW_CONF_PERPACKET_CW_SHIFT (1 << 0)
-#define RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT (1 << 1)
+#define RTL818X_CW_CONF_PERPACKET_CW (1 << 0)
+#define RTL818X_CW_CONF_PERPACKET_RETRY (1 << 1)
u8 CW_VAL;
u8 RATE_FALLBACK;
#define RTL818X_RATE_FALLBACK_ENABLE (1 << 7)
@@ -167,7 +283,8 @@ struct rtl818x_csr {
u8 reserved_17[24];
u8 CONFIG5;
u8 TX_DMA_POLLING;
- u8 reserved_18[2];
+ u8 PHY_PR;
+ u8 reserved_18;
__le16 CWR;
u8 RETRY_CTR;
u8 reserved_19[3];
@@ -179,20 +296,64 @@ struct rtl818x_csr {
__le32 RDSAR;
__le16 TID_AC_MAP;
u8 reserved_20[4];
- u8 ANAPARAM3;
- u8 reserved_21[5];
- __le16 FEMR;
- u8 reserved_22[4];
- __le16 TALLY_CNT;
- u8 TALLY_SEL;
+ union {
+ __le16 ANAPARAM3; /* 0xee */
+ u8 ANAPARAM3A; /* for rtl8187 */
+ };
+
+#define AC_PARAM_TXOP_LIMIT_SHIFT 16
+#define AC_PARAM_ECW_MAX_SHIFT 12
+#define AC_PARAM_ECW_MIN_SHIFT 8
+#define AC_PARAM_AIFS_SHIFT 0
+
+ __le32 AC_VO_PARAM; /* 0xf0 */
+
+ union { /* 0xf4 */
+ __le32 AC_VI_PARAM;
+ __le16 FEMR;
+ } __packed;
+
+ union{ /* 0xf8 */
+ __le32 AC_BE_PARAM; /* rtl8187se */
+ struct{
+ u8 reserved_21[2];
+ __le16 TALLY_CNT; /* 0xfa */
+ } __packed;
+ } __packed;
+
+ union {
+ u8 TALLY_SEL; /* 0xfc */
+ __le32 AC_BK_PARAM;
+
+ } __packed;
+
} __packed;
+/* These are addresses with NON-standard usage.
+ * They have offsets very far from this struct.
+ * I don't like to introduce a ton of "reserved"..
+ * They are for RTL8187SE
+ */
+#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + addr)
+#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + (addr >> 1))
+#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + (addr >> 2))
+
+#define FEMR_SE REG_ADDR2(0x1D4)
+#define ARFR REG_ADDR2(0x1E0)
+#define RFSW_CTRL REG_ADDR2(0x272)
+#define SW_3W_DB0 REG_ADDR2(0x274)
+#define SW_3W_DB0_4 REG_ADDR4(0x274)
+#define SW_3W_DB1 REG_ADDR2(0x278)
+#define SW_3W_DB1_4 REG_ADDR4(0x278)
+#define SW_3W_CMD1 REG_ADDR1(0x27D)
+#define PI_DATA_REG REG_ADDR2(0x360)
+#define SI_DATA_REG REG_ADDR2(0x362)
+
struct rtl818x_rf_ops {
char *name;
void (*init)(struct ieee80211_hw *);
void (*stop)(struct ieee80211_hw *);
void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
- void (*conf_erp)(struct ieee80211_hw *, struct ieee80211_bss_conf *);
u8 (*calc_rssi)(u8 agc, u8 sq);
};
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index c2ffce7a907c..bf3cf124e4ea 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -5,7 +5,7 @@ menuconfig RTL_CARDS
---help---
This option will enable support for the Realtek mac80211-based
wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de,
- rtl8723eu, and rtl8188eu share some common code.
+ rtl8723ae, rtl8723be, and rtl8188ae share some common code.
if RTL_CARDS
@@ -48,12 +48,27 @@ config RTL8723AE
depends on PCI
select RTLWIFI
select RTLWIFI_PCI
+ select RTL8723_COMMON
+ select RTLBTCOEXIST
---help---
This is the driver for Realtek RTL8723AE 802.11n PCIe
wireless network adapters.
If you choose to build it as a module, it will be called rtl8723ae
+config RTL8723BE
+ tristate "Realtek RTL8723BE PCIe Wireless Network Adapter"
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
+ select RTL8723_COMMON
+ select RTLBTCOEXIST
+ ---help---
+ This is the driver for Realtek RTL8723BE 802.11n PCIe
+ wireless network adapters.
+
+ If you choose to build it as a module, it will be called rtl8723be
+
config RTL8188EE
tristate "Realtek RTL8188EE Wireless Network Adapter"
depends on PCI
@@ -101,4 +116,14 @@ config RTL8192C_COMMON
depends on RTL8192CE || RTL8192CU
default y
+config RTL8723_COMMON
+ tristate
+ depends on RTL8723AE || RTL8723BE
+ default y
+
+config RTLBTCOEXIST
+ tristate
+ depends on RTL8723AE || RTL8723BE
+ default y
+
endif
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index d56f023a4b90..bba36a06abcc 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -24,6 +24,9 @@ obj-$(CONFIG_RTL8192CU) += rtl8192cu/
obj-$(CONFIG_RTL8192SE) += rtl8192se/
obj-$(CONFIG_RTL8192DE) += rtl8192de/
obj-$(CONFIG_RTL8723AE) += rtl8723ae/
+obj-$(CONFIG_RTL8723BE) += rtl8723be/
obj-$(CONFIG_RTL8188EE) += rtl8188ee/
+obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/
+obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/rtlwifi/btcoexist/Makefile
new file mode 100644
index 000000000000..47ceecfcb7dc
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/Makefile
@@ -0,0 +1,7 @@
+btcoexist-objs := halbtc8723b2ant.o \
+ halbtcoutsrc.o \
+ rtl_btc.o
+
+obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
new file mode 100644
index 000000000000..d76684eb24d0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
+
+#ifndef __HALBT_PRECOMP_H__
+#define __HALBT_PRECOMP_H__
+/*************************************************************
+ * include files
+ *************************************************************/
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+
+#include "halbtcoutsrc.h"
+
+#include "halbtc8723b2ant.h"
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+#endif /* __HALBT_PRECOMP_H__ */
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
new file mode 100644
index 000000000000..d916ab9f3c38
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -0,0 +1,3698 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2012 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+/***************************************************************
+ * Description:
+ *
+ * This file is for RTL8723B Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ **************************************************************/
+/**************************************************************
+ * include files
+ **************************************************************/
+#include "halbt_precomp.h"
+/**************************************************************
+ * Global variables, these are static variables
+ **************************************************************/
+static struct coex_dm_8723b_2ant glcoex_dm_8723b_2ant;
+static struct coex_dm_8723b_2ant *coex_dm = &glcoex_dm_8723b_2ant;
+static struct coex_sta_8723b_2ant glcoex_sta_8723b_2ant;
+static struct coex_sta_8723b_2ant *coex_sta = &glcoex_sta_8723b_2ant;
+
+static const char *const glbt_info_src_8723b_2ant[] = {
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+static u32 glcoex_ver_date_8723b_2ant = 20130731;
+static u32 glcoex_ver_8723b_2ant = 0x3b;
+
+/**************************************************************
+ * local function proto type if needed
+ **************************************************************/
+/**************************************************************
+ * local function start with btc8723b2ant_
+ **************************************************************/
+static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+ u8 rssi_thresh1)
+{
+ s32 bt_rssi = 0;
+ u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+
+ bt_rssi = coex_sta->bt_rssi;
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
+ return coex_sta->pre_bt_rssi_state;
+ }
+
+ if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+ if (bt_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_bt_rssi_state ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (bt_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ bt_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to High\n");
+ } else if (bt_rssi < rssi_thresh) {
+ bt_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Low\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (bt_rssi < rssi_thresh1) {
+ bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "switch to Medium\n");
+ } else {
+ bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+ return bt_rssi_state;
+}
+
+static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+ u8 index, u8 level_num,
+ u8 rssi_thresh, u8 rssi_thresh1)
+{
+ s32 wifi_rssi = 0;
+ u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+ if (level_num == 2) {
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ } else if (level_num == 3) {
+ if (rssi_thresh > rssi_thresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
+ return coex_sta->pre_wifi_rssi_state[index];
+ }
+
+ if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_LOW) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_LOW)) {
+ if (wifi_rssi >= rssi_thresh +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Low\n");
+ }
+ } else if ((coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_MEDIUM) ||
+ (coex_sta->pre_wifi_rssi_state[index] ==
+ BTC_RSSI_STATE_STAY_MEDIUM)) {
+ if (wifi_rssi >= rssi_thresh1 +
+ BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+ wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to High\n");
+ } else if (wifi_rssi < rssi_thresh) {
+ wifi_rssi_state = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Low\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at Medium\n");
+ }
+ } else {
+ if (wifi_rssi < rssi_thresh1) {
+ wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "switch to Medium\n");
+ } else {
+ wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state "
+ "stay at High\n");
+ }
+ }
+ }
+
+ coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+ return wifi_rssi_state;
+}
+
+static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+ u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+ u32 reg_hp_tx = 0, reg_hp_rx = 0;
+ u32 reg_lp_tx = 0, reg_lp_rx = 0;
+
+ reg_hp_txrx = 0x770;
+ reg_lp_txrx = 0x774;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+ reg_hp_tx = u32tmp & MASKLWORD;
+ reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+ reg_lp_tx = u32tmp & MASKLWORD;
+ reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
+
+ coex_sta->high_priority_tx = reg_hp_tx;
+ coex_sta->high_priority_rx = reg_hp_rx;
+ coex_sta->low_priority_tx = reg_lp_tx;
+ coex_sta->low_priority_rx = reg_lp_rx;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+ "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+
+ /* reset counter */
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+ static bool pre_wifi_busy;
+ static bool pre_under_4way;
+ static bool pre_bt_hs_on;
+ bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+ bool wifi_connected = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ &under_4way);
+
+ if (wifi_connected) {
+ if (wifi_busy != pre_wifi_busy) {
+ pre_wifi_busy = wifi_busy;
+ return true;
+ }
+
+ if (under_4way != pre_under_4way) {
+ pre_under_4way = under_4way;
+ return true;
+ }
+
+ if (bt_hs_on != pre_bt_hs_on) {
+ pre_bt_hs_on = bt_hs_on;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+ /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+ bt_link_info->sco_exist = coex_sta->sco_exist;
+ bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+ bt_link_info->pan_exist = coex_sta->pan_exist;
+ bt_link_info->hid_exist = coex_sta->hid_exist;
+
+ /* work around for HS mode. */
+ if (bt_hs_on) {
+ bt_link_info->pan_exist = true;
+ bt_link_info->bt_link_exist = true;
+ }
+#else /* profile from bt stack */
+ bt_link_info->bt_link_exist = stack_info->bt_link_exist;
+ bt_link_info->sco_exist = stack_info->sco_exist;
+ bt_link_info->a2dp_exist = stack_info->a2dp_exist;
+ bt_link_info->pan_exist = stack_info->pan_exist;
+ bt_link_info->hid_exist = stack_info->hid_exist;
+
+ /*for win-8 stack HID report error*/
+ if (!stack_info->hid_exist)
+ stack_info->hid_exist = coex_sta->hid_exist;
+ /*sync BTInfo with BT firmware and stack*/
+ /* when stack HID report error, here we use the info from bt fw.*/
+ if (!stack_info->bt_link_exist)
+ stack_info->bt_link_exist = coex_sta->bt_link_exist;
+#endif
+ /* check if Sco only */
+ if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->sco_only = true;
+ else
+ bt_link_info->sco_only = false;
+
+ /* check if A2dp only */
+ if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->a2dp_only = true;
+ else
+ bt_link_info->a2dp_only = false;
+
+ /* check if Pan only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ bt_link_info->pan_exist && !bt_link_info->hid_exist)
+ bt_link_info->pan_only = true;
+ else
+ bt_link_info->pan_only = false;
+
+ /* check if Hid only */
+ if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+ !bt_link_info->pan_exist && bt_link_info->hid_exist)
+ bt_link_info->hid_only = true;
+ else
+ bt_link_info->hid_only = false;
+}
+
+static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ bool bt_hs_on = false;
+ u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
+ u8 num_of_diff_profile = 0;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+ if (!bt_link_info->bt_link_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
+ return algorithm;
+ }
+
+ if (bt_link_info->sco_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->hid_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->pan_exist)
+ num_of_diff_profile++;
+ if (bt_link_info->a2dp_exist)
+ num_of_diff_profile++;
+
+ if (num_of_diff_profile == 1) {
+ if (bt_link_info->sco_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2DP only\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], PAN(HS) only\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], PAN(EDR) only\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (num_of_diff_profile == 2) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + PAN(HS)\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + PAN(HS)\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex],A2DP + PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (num_of_diff_profile == 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->a2dp_exist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP"
+ " ==> HID\n");
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + "
+ "PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + "
+ "PAN(EDR) ==> HID\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP + "
+ "PAN(HS)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], HID + A2DP + "
+ "PAN(EDR)\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (num_of_diff_profile >= 3) {
+ if (bt_link_info->sco_exist) {
+ if (bt_link_info->hid_exist &&
+ bt_link_info->pan_exist &&
+ bt_link_info->a2dp_exist) {
+ if (bt_hs_on) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Error!!! SCO + HID"
+ " + A2DP + PAN(HS)\n");
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP +"
+ " PAN(EDR)==>PAN(EDR)+HID\n");
+ algorithm =
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+ return algorithm;
+}
+
+static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
+{
+ bool ret = false;
+ bool bt_hs_on = false, wifi_connected = false;
+ s32 bt_hs_rssi = 0;
+ u8 bt_rssi_state;
+
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+ return false;
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected))
+ return false;
+ if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+ return false;
+
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ if (wifi_connected) {
+ if (bt_hs_on) {
+ if (bt_hs_rssi > 37) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt "
+ "power for HS mode!!\n");
+ ret = true;
+ }
+ } else {
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt "
+ "power for Wifi is connected!!\n");
+ ret = true;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+ u8 dac_swing_lvl)
+{
+ u8 h2c_parameter[1] = {0};
+
+ /* There are several type of dacswing
+ * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+ */
+ h2c_parameter[0] = dac_swing_lvl;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool dec_bt_pwr)
+{
+ u8 h2c_parameter[1] = {0};
+
+ h2c_parameter[0] = 0;
+
+ if (dec_bt_pwr)
+ h2c_parameter[0] |= BIT1;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+ (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+ bool force_exec, bool dec_bt_pwr)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+ coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+
+ if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+ return;
+ }
+ btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+ coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+ bool force_exec, u8 fw_dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preFwDacSwingLvl=%d, "
+ "curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
+
+ if (coex_dm->pre_fw_dac_swing_lvl ==
+ coex_dm->cur_fw_dac_swing_lvl)
+ return;
+ }
+
+ btc8723b2ant_set_fw_dac_swing_level(btcoexist,
+ coex_dm->cur_fw_dac_swing_lvl);
+ coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+ bool rx_rf_shrink_on)
+{
+ if (rx_rf_shrink_on) {
+ /* Shrink RF Rx LPF corner */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff, 0xffffc);
+ } else {
+ /* Resume RF Rx LPF corner */
+ /* After initialized, we can use coex_dm->btRf0x1eBackup */
+ if (btcoexist->initilized) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+ 0xfffff,
+ coex_dm->bt_rf0x1e_backup);
+ }
+ }
+}
+
+static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
+ bool force_exec, bool rx_rf_shrink_on)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
+ "ON" : "OFF"));
+ coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreRfRxLpfShrink=%d, "
+ "bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ if (coex_dm->pre_rf_rx_lpf_shrink ==
+ coex_dm->cur_rf_rx_lpf_shrink)
+ return;
+ }
+ btc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist,
+ coex_dm->cur_rf_rx_lpf_shrink);
+
+ coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
+ bool low_penalty_ra)
+{
+ u8 h2c_parameter[6] = {0};
+
+ h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
+
+ if (low_penalty_ra) {
+ h2c_parameter[1] |= BIT0;
+ /*normal rate except MCS7/6/5, OFDM54/48/36*/
+ h2c_parameter[2] = 0x00;
+ h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/
+ h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/
+ h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+ bool force_exec, bool low_penalty_ra)
+{
+ /*return; */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""), (low_penalty_ra ?
+ "ON" : "OFF"));
+ coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreLowPenaltyRa=%d, "
+ "bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
+
+ if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+ return;
+ }
+ btc8723b_set_penalty_txrate(btcoexist, coex_dm->cur_low_penalty_ra);
+
+ coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
+ u32 level)
+{
+ u8 val = (u8) level;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex,
+ bool sw_dac_swing_on,
+ u32 sw_dac_swing_lvl)
+{
+ if (sw_dac_swing_on)
+ btc8723b2ant_set_dac_swing_reg(btcoex, sw_dac_swing_lvl);
+ else
+ btc8723b2ant_set_dac_swing_reg(btcoex, 0x18);
+}
+
+
+static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
+ bool force_exec, bool dac_swing_on,
+ u32 dac_swing_lvl)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec ? "force to" : ""),
+ (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+ coex_dm->cur_dac_swing_on = dac_swing_on;
+ coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
+ " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
+
+ if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+ (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+ return;
+ }
+ mdelay(30);
+ btc8723b2ant_set_sw_fulltime_dac_swing(btcoexist, dac_swing_on,
+ dac_swing_lvl);
+
+ coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+ coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
+ bool agc_table_en)
+{
+ u8 rssi_adjust_val = 0;
+
+ /* BB AGC Gain Table */
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
+ btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001);
+ }
+
+
+ /* RF Gain */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x38fff);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x380c3);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+ 0xfffff, 0x28ce6);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
+
+ if (agc_table_en) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x38fff);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x380c3);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+ 0xfffff, 0x28ce6);
+ }
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
+
+ /* set rssiAdjustVal for wifi module. */
+ if (agc_table_en)
+ rssi_adjust_val = 8;
+ btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ &rssi_adjust_val);
+}
+
+static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
+ bool force_exec, bool agc_table_en)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ (agc_table_en ? "Enable" : "Disable"));
+ coex_dm->cur_agc_table_en = agc_table_en;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+
+ if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+ return;
+ }
+ btc8723b2ant_set_agc_table(btcoexist, agc_table_en);
+
+ coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
+ u32 val0x6c0, u32 val0x6c4,
+ u32 val0x6c8, u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
+ bool force_exec, u32 val0x6c0,
+ u32 val0x6c4, u32 val0x6c8,
+ u8 val0x6cc)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
+ " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ (force_exec ? "force to" : ""), val0x6c0,
+ val0x6c4, val0x6c8, val0x6cc);
+ coex_dm->cur_val0x6c0 = val0x6c0;
+ coex_dm->cur_val0x6c4 = val0x6c4;
+ coex_dm->cur_val0x6c8 = val0x6c8;
+ coex_dm->cur_val0x6cc = val0x6cc;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0=0x%x, "
+ "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
+ "preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0=0x%x, "
+ "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
+ "curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+
+ if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+ (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+ (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+ (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+ return;
+ }
+ btc8723b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
+
+ coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+ coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+ coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+ coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist,
+ bool force_exec, u8 type)
+{
+ switch (type) {
+ case 0:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x55555555, 0xffff, 0x3);
+ break;
+ case 1:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+ 0x5afa5afa, 0xffff, 0x3);
+ break;
+ case 2:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 3:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+ 0xaaaaaaaa, 0xffff, 0x3);
+ break;
+ case 4:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
+ 0xffffffff, 0xffff, 0x3);
+ break;
+ case 5:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+ 0x5fff5fff, 0xffff, 0x3);
+ break;
+ case 6:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 7:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5afa5afa, 0xffff, 0x3);
+ break;
+ case 8:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea,
+ 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 9:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 10:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5aff5aff, 0xffff, 0x3);
+ break;
+ case 11:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5a5f5a5f, 0xffff, 0x3);
+ break;
+ case 12:
+ btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+ 0x5f5f5f5f, 0xffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool enable)
+{
+ u8 h2c_parameter[1] = {0};
+
+ if (enable)
+ h2c_parameter[0] |= BIT0;/* function enable*/
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, "
+ "FW write 0x63=0x%x\n", h2c_parameter[0]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+ bool force_exec, bool enable)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ coex_dm->cur_ignore_wlan_act = enable;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, "
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
+
+ if (coex_dm->pre_ignore_wlan_act ==
+ coex_dm->cur_ignore_wlan_act)
+ return;
+ }
+ btc8723b2ant_set_fw_ignore_wlan_act(btcoexist, enable);
+
+ coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
+ u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+ u8 h2c_parameter[5];
+
+ h2c_parameter[0] = byte1;
+ h2c_parameter[1] = byte2;
+ h2c_parameter[2] = byte3;
+ h2c_parameter[3] = byte4;
+ h2c_parameter[4] = byte5;
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+static void btc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+ bool shrink_rx_lpf, bool low_penalty_ra,
+ bool limited_dig, bool bt_lna_constrain)
+{
+ btc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+ btc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+}
+
+static void btc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+ bool agc_table_shift, bool adc_backoff,
+ bool sw_dac_swing, u32 dac_swing_lvl)
+{
+ btc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
+ btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+ dac_swing_lvl);
+}
+
+static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
+ bool turn_on, u8 type)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ if (!force_exec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+
+ if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+ (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+ return;
+ }
+ if (turn_on) {
+ switch (type) {
+ case 1:
+ default:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 2:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 3:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0xf1, 0x90);
+ break;
+ case 4:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+ 0x03, 0xf1, 0x90);
+ break;
+ case 5:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 6:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 7:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ 0x3, 0x70, 0x90);
+ break;
+ case 8:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
+ 0x3, 0x70, 0x90);
+ break;
+ case 9:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ case 10:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0xe1, 0x90);
+ break;
+ case 11:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0xa, 0xe1, 0x90);
+ break;
+ case 12:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 13:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0x60, 0x90);
+ break;
+ case 14:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+ 0x12, 0x60, 0x90);
+ break;
+ case 15:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+ 0xa, 0x60, 0x90);
+ break;
+ case 16:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0x60, 0x90);
+ break;
+ case 17:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
+ 0x2f, 0x60, 0x90);
+ break;
+ case 18:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+ 0x5, 0xe1, 0x90);
+ break;
+ case 19:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0xe1, 0x90);
+ break;
+ case 20:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+ 0x25, 0x60, 0x90);
+ break;
+ case 21:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+ 0x03, 0x70, 0x90);
+ break;
+ case 71:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+ 0x1a, 0xe1, 0x90);
+ break;
+ }
+ } else {
+ /* disable PS tdma */
+ switch (type) {
+ case 0:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
+ break;
+ case 1:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x48, 0x0);
+ break;
+ default:
+ btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+ 0x40, 0x0);
+ break;
+ }
+ }
+
+ /* update pre state */
+ coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+ coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+static void btc8723b2ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+ /* fw all off */
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ /* sw all off */
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ /* hw all off */
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+static void btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ /* force to reset coex mechanism*/
+
+ btc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+ bool wifi_connected = false;
+ bool low_pwr_disable = true;
+
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+
+ if (wifi_connected) {
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ } else {
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ }
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+ coex_dm->need_recover_0x948 = true;
+ coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948);
+
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+}
+
+static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
+{
+ bool common = false, wifi_connected = false;
+ bool wifi_busy = false;
+ bool bt_hs_on = false, low_pwr_disable = false;
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+ if (!wifi_connected) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false, false,
+ false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false, false,
+ 0x18);
+
+ common = true;
+ } else {
+ if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = false;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT non connected-idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ common = true;
+ } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE ==
+ coex_dm->bt_status) {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (bt_hs_on)
+ return false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi connected + "
+ "BT connected-idle!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 0xb);
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+ false);
+
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+
+ common = true;
+ } else {
+ low_pwr_disable = true;
+ btcoexist->btc_set(btcoexist,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ &low_pwr_disable);
+
+ if (wifi_busy) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + "
+ "BT Busy!!\n");
+ common = false;
+ } else {
+ if (bt_hs_on)
+ return false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + "
+ "BT Busy!!\n");
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+ 0x1, 0xfffff, 0x0);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC,
+ 7);
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 21);
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist,
+ NORMAL_EXEC,
+ 0xb);
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC,
+ true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist,
+ NORMAL_EXEC,
+ false);
+ btc8723b2ant_sw_mechanism1(btcoexist, false,
+ false, false,
+ false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false,
+ false, false,
+ 0x18);
+ common = true;
+ }
+ }
+ }
+
+ return common;
+}
+
+static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
+ s32 result)
+{
+ /* Set PS TDMA for max interval == 1 */
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+
+ if (coex_dm->cur_ps_tdma == 71) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 5);
+ coex_dm->tdma_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 5);
+ coex_dm->tdma_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 13);
+ coex_dm->tdma_adj_type = 13;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 5);
+ coex_dm->tdma_adj_type = 5;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 13);
+ coex_dm->tdma_adj_type = 13;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
+ coex_dm->tdma_adj_type = 71;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
+ coex_dm->tdma_adj_type = 4;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ coex_dm->tdma_adj_type = 9;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
+ coex_dm->tdma_adj_type = 12;
+ }
+
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 71) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 1);
+ coex_dm->tdma_adj_type = 1;
+ } else if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->tdma_adj_type = 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->tdma_adj_type = 12;
+ }
+ } else if (result == 1) {
+ int tmp = coex_dm->cur_ps_tdma;
+ switch (tmp) {
+ case 4:
+ case 3:
+ case 2:
+ case 12:
+ case 11:
+ case 10:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, tmp - 1);
+ coex_dm->tdma_adj_type = tmp - 1;
+ break;
+ case 1:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 71);
+ coex_dm->tdma_adj_type = 71;
+ break;
+ }
+ }
+ }
+}
+
+static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
+ s32 result)
+{
+ /* Set PS TDMA for max interval == 2 */
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
+ coex_dm->tdma_adj_type = 4;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
+ coex_dm->tdma_adj_type = 12;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->tdma_adj_type = 4;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->tdma_adj_type = 12;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ }
+ }
+ }
+}
+
+static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
+ s32 result)
+{
+ /* Set PS TDMA for max interval == 3 */
+ if (tx_pause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
+ if (coex_dm->cur_ps_tdma == 1) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 2) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 3) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 4) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 9) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 10) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 11) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 12) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ if (result == -1) {
+ if (coex_dm->cur_ps_tdma == 5) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 8);
+ coex_dm->tdma_adj_type = 8;
+ } else if (coex_dm->cur_ps_tdma == 13) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 16);
+ coex_dm->tdma_adj_type = 16;
+ }
+ } else if (result == 1) {
+ if (coex_dm->cur_ps_tdma == 8) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 7) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 6) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else if (coex_dm->cur_ps_tdma == 16) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 15) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else if (coex_dm->cur_ps_tdma == 14) {
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
+ switch (coex_dm->cur_ps_tdma) {
+ case 5:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 6:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 7:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 8:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
+ coex_dm->tdma_adj_type = 4;
+ break;
+ case 13:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 14:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 15:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 16:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
+ coex_dm->tdma_adj_type = 12;
+ break;
+ }
+ if (result == -1) {
+ switch (coex_dm->cur_ps_tdma) {
+ case 1:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 2:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 3:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 4);
+ coex_dm->tdma_adj_type = 4;
+ break;
+ case 9:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 10:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 11:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 12);
+ coex_dm->tdma_adj_type = 12;
+ break;
+ }
+ } else if (result == 1) {
+ switch (coex_dm->cur_ps_tdma) {
+ case 4:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 3:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 2:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ break;
+ case 12:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 11:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ break;
+ case 10:
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ }
+ }
+ }
+}
+
+static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+ bool sco_hid, bool tx_pause,
+ u8 max_interval)
+{
+ static s32 up, dn, m, n, wait_count;
+ /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
+ s32 result;
+ u8 retry_count = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
+
+ if (!coex_dm->auto_tdma_adjust) {
+ coex_dm->auto_tdma_adjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ if (sco_hid) {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 13);
+ coex_dm->tdma_adj_type = 13;
+ } else if (max_interval == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 14);
+ coex_dm->tdma_adj_type = 14;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ } else {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 15);
+ coex_dm->tdma_adj_type = 15;
+ }
+ } else {
+ if (max_interval == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 9);
+ coex_dm->tdma_adj_type = 9;
+ } else if (max_interval == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 10);
+ coex_dm->tdma_adj_type = 10;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ } else {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 11);
+ coex_dm->tdma_adj_type = 11;
+ }
+ }
+ } else {
+ if (tx_pause) {
+ if (max_interval == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 5);
+ coex_dm->tdma_adj_type = 5;
+ } else if (max_interval == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 6);
+ coex_dm->tdma_adj_type = 6;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ } else {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 7);
+ coex_dm->tdma_adj_type = 7;
+ }
+ } else {
+ if (max_interval == 1) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 1);
+ coex_dm->tdma_adj_type = 1;
+ } else if (max_interval == 2) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 2);
+ coex_dm->tdma_adj_type = 2;
+ } else if (max_interval == 3) {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ } else {
+ btc8723b2ant_ps_tdma(btcoexist,
+ NORMAL_EXEC,
+ true, 3);
+ coex_dm->tdma_adj_type = 3;
+ }
+ }
+ }
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ wait_count = 0;
+ } else {
+ /*accquire the BT TRx retry count from BT_Info byte2*/
+ retry_count = coex_sta->bt_retry_cnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count);
+ result = 0;
+ wait_count++;
+ /* no retry in the last 2-second duration*/
+ if (retry_count == 0) {
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) {
+ wait_count = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi "
+ "duration!!\n");
+ } /* <=3 retry in the last 2-second duration*/
+ } else if (retry_count <= 3) {
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) {
+ if (wait_count <= 2)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration "
+ "for retry_counter<3!!\n");
+ }
+ } else {
+ if (wait_count == 1)
+ m++;
+ else
+ m = 1;
+
+ if (m >= 20)
+ m = 20;
+
+ n = 3 * m;
+ up = 0;
+ dn = 0;
+ wait_count = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration "
+ "for retry_counter>3!!\n");
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
+ if (max_interval == 1)
+ set_tdma_int1(btcoexist, tx_pause, result);
+ else if (max_interval == 2)
+ set_tdma_int2(btcoexist, tx_pause, result);
+ else if (max_interval == 3)
+ set_tdma_int3(btcoexist, tx_pause, result);
+ }
+
+ /*if current PsTdma not match with the recorded one (when scan, dhcp..),
+ *then we have to adjust it back to the previous recorded one.
+ */
+ if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
+ bool scan = false, link = false, roam = false;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, "
+ "curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+ if (!scan && !link && !roam)
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+ coex_dm->tdma_adj_type);
+ else
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under"
+ " progress, will adjust next time!!!\n");
+ }
+}
+
+/* SCO only or SCO+PAN(HS) */
+static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ /*for SCO quality at 11b/g mode*/
+ if (BTC_WIFI_BW_LEGACY == wifi_bw)
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 2);
+ else /*for SCO quality & wifi performance balance at 11n mode*/
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 8);
+
+ /*for voice quality */
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x4);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x4);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ true, 0x4);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ true, 0x4);
+ }
+ }
+}
+
+static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ else /*for HID quality & wifi performance balance at 11n mode*/
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 9);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+ else
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
+static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ false, 1);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 1);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 10);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+ else
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+
+ /* sw mechanism */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*PAN(HS) only*/
+static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/*PAN(EDR)+A2DP*/
+static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 12);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ true, 3);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false,
+ false, 3);
+ } else {
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 3);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x780);
+ } else {
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+ 6);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+ 0xfffff, 0x0);
+ }
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ } else {
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+ 0x0);
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ true, 2);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+ false, 3);
+ } else {
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+ u8 wifi_rssi_state, bt_rssi_state;
+ u32 wifi_bw;
+
+ wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+ 0, 2, 15, 0);
+ bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+ btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+ if (btc8723b_need_dec_pwr(btcoexist))
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+ else
+ btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+ btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+ else
+ btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifi_bw) {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ } else {
+ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+ (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+ false, 0x18);
+ } else {
+ btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+ false, false);
+ btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+ false, 0x18);
+ }
+ }
+}
+
+static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+ u8 algorithm = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), "
+ "return for Manual CTRL <===\n");
+ return;
+ }
+
+ if (coex_sta->under_ips) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
+ return;
+ }
+
+ algorithm = btc8723b2ant_action_algorithm(btcoexist);
+ if (coex_sta->c2h_bt_inquiry_page &&
+ (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
+ btc8723b2ant_action_bt_inquiry(btcoexist);
+ return;
+ } else {
+ if (coex_dm->need_recover_0x948) {
+ coex_dm->need_recover_0x948 = false;
+ btcoexist->btc_write_2byte(btcoexist, 0x948,
+ coex_dm->backup_0x948);
+ }
+ }
+
+ coex_dm->cur_algorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
+ coex_dm->cur_algorithm);
+
+ if (btc8723b2ant_is_common_action(btcoexist)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common.\n");
+ coex_dm->auto_tdma_adjust = false;
+ } else {
+ if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], preAlgorithm=%d, "
+ "curAlgorithm=%d\n", coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
+ coex_dm->auto_tdma_adjust = false;
+ }
+ switch (coex_dm->cur_algorithm) {
+ case BT_8723B_2ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+ btc8723b2ant_action_sco(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+ btc8723b2ant_action_hid(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = A2DP.\n");
+ btc8723b2ant_action_a2dp(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = A2DP+PAN(HS).\n");
+ btc8723b2ant_action_a2dp_pan_hs(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN(EDR).\n");
+ btc8723b2ant_action_pan_edr(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HS mode.\n");
+ btc8723b2ant_action_pan_hs(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN+A2DP.\n");
+ btc8723b2ant_action_pan_edr_a2dp(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = PAN(EDR)+HID.\n");
+ btc8723b2ant_action_pan_edr_hid(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HID+A2DP+PAN.\n");
+ btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = HID+A2DP.\n");
+ btc8723b2ant_action_hid_a2dp(btcoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, "
+ "algorithm = coexist All Off!!\n");
+ btc8723b2ant_coex_alloff(btcoexist);
+ break;
+ }
+ coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+ }
+}
+
+
+
+/*********************************************************************
+ * work around function start with wa_btc8723b2ant_
+ *********************************************************************/
+/*********************************************************************
+ * extern function start with EXbtc8723b2ant_
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ u32 u32tmp = 0, fw_ver;
+ u8 u8tmp = 0;
+ u8 h2c_parameter[2] = {0};
+
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
+
+ /* backup rf 0x1e value */
+ coex_dm->bt_rf0x1e_backup = btcoexist->btc_get_rf_reg(btcoexist,
+ BTC_RF_A, 0x1e,
+ 0xfffff);
+
+ /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */
+ u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u32tmp &= ~BIT23;
+ u32tmp |= BIT24;
+ btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+ btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+ btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+
+ /* Antenna switch control parameter */
+ /* btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);*/
+
+ /*Force GNT_BT to low*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+ btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+
+ /* 0x790[5:0]=0x5 */
+ u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+ u8tmp &= 0xc0;
+ u8tmp |= 0x5;
+ btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+
+ /*Antenna config */
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+
+ /*ext switch for fw ver < 0xc */
+ if (fw_ver < 0xc00) {
+ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x1);
+ /*Main Ant to BT for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
+ 0x1);
+
+ /*tell firmware "no antenna inverse"*/
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 1; /* ext switch type */
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+ 0x3, 0x2);
+ /*Aux Ant to BT for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
+ 0x0);
+
+ /*tell firmware "antenna inverse"*/
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 1; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ } else {
+ /*ext switch always at s1 (if exist) */
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1);
+ /*Main Ant to BT for IPS case 0x4c[23]=1*/
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x1);
+
+ if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+ /*tell firmware "no antenna inverse"*/
+ h2c_parameter[0] = 0;
+ h2c_parameter[1] = 0; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ } else {
+ /*tell firmware "antenna inverse"*/
+ h2c_parameter[0] = 1;
+ h2c_parameter[1] = 0; /*ext switch type*/
+ btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ h2c_parameter);
+ }
+ }
+
+ /* PTA parameter */
+ btc8723b_coex_tbl_type(btcoexist, FORCE_EXEC, 0);
+
+ /* Enable counter statistics */
+ /*0x76e[3] =1, WLAN_Act control by PTA*/
+ btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+ btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
+ btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+}
+
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
+ btc8723b2ant_init_coex_dm(btcoexist);
+}
+
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+ u8 *cli_buf = btcoexist->cli_buf;
+ u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
+ u32 u32tmp[4];
+ bool roam = false, scan = false;
+ bool link = false, wifi_under_5g = false;
+ bool bt_hs_on = false, wifi_busy = false;
+ s32 wifi_rssi = 0, bt_hs_rssi = 0;
+ u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
+ u8 wifi_dot11_chnl, wifi_hs_chnl;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cli_buf);
+
+ if (btcoexist->manual_control) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========[Under Manual Control]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n ==========================================");
+ CL_PRINTF(cli_buf);
+ }
+
+ if (!board_info->bt_exist) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+ CL_PRINTF(cli_buf);
+ return;
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "Ant PG number/ Ant mechanism:",
+ board_info->pg_ant_num, board_info->btdm_ant_num);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+ "BT stack/ hci ext ver",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+ "CoexVer/ FwVer/ PatchVer",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+ &wifi_dot11_chnl);
+ btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+ "Dot11 channel / HsChnl(HsMode)",
+ wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+ "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
+ coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+ btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "Wifi link/ roam/ scan", link, roam, scan);
+ CL_PRINTF(cli_buf);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ &wifi_traffic_dir);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+ "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+ ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
+ (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
+ ((!wifi_busy) ? "idle" :
+ ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
+ "uplink" : "downlink")));
+ CL_PRINTF(cli_buf);
+
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+ "SCO/HID/PAN/A2DP",
+ bt_link_info->sco_exist, bt_link_info->hid_exist,
+ bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ bt_info_ext = coex_sta->bt_info_ext;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+ "BT Info A2DP rate",
+ (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
+ CL_PRINTF(cli_buf);
+
+ for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
+ if (coex_sta->bt_info_c2h_cnt[i]) {
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x "
+ "%02x %02x %02x %02x(%d)",
+ glbt_info_src_8723b_2ant[i],
+ coex_sta->bt_info_c2h[i][0],
+ coex_sta->bt_info_c2h[i][1],
+ coex_sta->bt_info_c2h[i][2],
+ coex_sta->bt_info_c2h[i][3],
+ coex_sta->bt_info_c2h[i][4],
+ coex_sta->bt_info_c2h[i][5],
+ coex_sta->bt_info_c2h[i][6],
+ coex_sta->bt_info_c2h_cnt[i]);
+ CL_PRINTF(cli_buf);
+ }
+ }
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
+ "PS state, IPS/LPS",
+ ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+ ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+ CL_PRINTF(cli_buf);
+ btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ /* Sw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s", "============[Sw mechanism]============");
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+ "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+ coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+ coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+ coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+ CL_PRINTF(cli_buf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Fw mechanism]============");
+ CL_PRINTF(cli_buf);
+
+ ps_tdma_case = coex_dm->cur_ps_tdma;
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
+ "PS TDMA", coex_dm->ps_tdma_para[0],
+ coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+ coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+ ps_tdma_case, coex_dm->auto_tdma_adjust);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+ "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+ coex_dm->cur_ignore_wlan_act);
+ CL_PRINTF(cli_buf);
+
+ /* Hw setting */
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+ "============[Hw setting]============");
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+ "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+ CL_PRINTF(cli_buf);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x778/0x880[29:25]", u8tmp[0],
+ (u32tmp[0]&0x3e000000) >> 25);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x948/ 0x67[5] / 0x765",
+ u32tmp[0], ((u8tmp[0]&0x20) >> 5), u8tmp[1]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+ u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3);
+ CL_PRINTF(cli_buf);
+
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+ u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+ ((u8tmp[0] & 0x8)>>3), u8tmp[1],
+ ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+ "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
+ u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
+
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
+ u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
+
+ fa_ofdm = ((u32tmp[0]&0xffff0000) >> 16) +
+ ((u32tmp[1]&0xffff0000) >> 16) +
+ (u32tmp[1] & 0xffff) +
+ (u32tmp[2] & 0xffff) +
+ ((u32tmp[3]&0xffff0000) >> 16) +
+ (u32tmp[3] & 0xffff);
+ fa_cck = (u8tmp[0] << 8) + u8tmp[1];
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+ "OFDM-CCA/OFDM-FA/CCK-FA",
+ u32tmp[0]&0xffff, fa_ofdm, fa_cck);
+ CL_PRINTF(cli_buf);
+
+ u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+ u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+ u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+ u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+ "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+ u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+ CL_PRINTF(cli_buf);
+
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x770(high-pri rx/tx)",
+ coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+ CL_PRINTF(cli_buf);
+ CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+ "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+ coex_sta->low_priority_tx);
+ CL_PRINTF(cli_buf);
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
+ btc8723b2ant_monitor_bt_ctr(btcoexist);
+#endif
+ btcoexist->btc_disp_dbg_msg(btcoexist,
+ BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
+ coex_sta->under_ips = true;
+ btc8723b2ant_coex_alloff(btcoexist);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
+ coex_sta->under_ips = false;
+ }
+}
+
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
+ coex_sta->under_lps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
+ coex_sta->under_lps = false;
+ }
+}
+
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_SCAN_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
+ else if (BTC_SCAN_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
+}
+
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ if (BTC_ASSOCIATE_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
+ else if (BTC_ASSOCIATE_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
+}
+
+void btc8723b_med_stat_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ u8 h2c_parameter[3] = {0};
+ u32 wifi_bw;
+ u8 wifi_central_chnl;
+
+ if (BTC_MEDIA_CONNECT == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
+ if ((BTC_MEDIA_CONNECT == type) &&
+ (wifi_central_chnl <= 14)) {
+ h2c_parameter[0] = 0x1;
+ h2c_parameter[1] = wifi_central_chnl;
+ btcoexist->btc_get(btcoexist,
+ BTC_GET_U4_WIFI_BW, &wifi_bw);
+ if (BTC_WIFI_BW_HT40 == wifi_bw)
+ h2c_parameter[2] = 0x30;
+ else
+ h2c_parameter[2] = 0x20;
+ }
+
+ coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+ coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+ coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
+
+ btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (type == BTC_PACKET_DHCP)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
+}
+
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length)
+{
+ u8 bt_info = 0;
+ u8 i, rsp_source = 0;
+ bool bt_busy = false, limited_dig = false;
+ bool wifi_connected = false;
+
+ coex_sta->c2h_bt_info_req_sent = false;
+
+ rsp_source = tmpbuf[0]&0xf;
+ if (rsp_source >= BT_INFO_SRC_8723B_2ANT_MAX)
+ rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
+ coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
+ for (i = 0; i < length; i++) {
+ coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
+ if (i == 1)
+ bt_info = tmpbuf[i];
+ if (i == length-1)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x]\n", tmpbuf[i]);
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "0x%02x, ", tmpbuf[i]);
+ }
+
+ if (btcoexist->manual_control) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "return for Manual CTRL<===\n");
+ return;
+ }
+
+ if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) {
+ coex_sta->bt_retry_cnt = /* [3:0]*/
+ coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+
+ coex_sta->bt_rssi =
+ coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+
+ coex_sta->bt_info_ext =
+ coex_sta->bt_info_c2h[rsp_source][4];
+
+ /* Here we need to resend some wifi info to BT
+ * because bt is reset and loss of the info.
+ */
+ if ((coex_sta->bt_info_ext & BIT1)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check,"
+ " send wifi BW&Chnl to BT!!\n");
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+ &wifi_connected);
+ if (wifi_connected)
+ btc8723b_med_stat_notify(btcoexist,
+ BTC_MEDIA_CONNECT);
+ else
+ btc8723b_med_stat_notify(btcoexist,
+ BTC_MEDIA_DISCONNECT);
+ }
+
+ if ((coex_sta->bt_info_ext & BIT3)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, "
+ "set BT NOT to ignore Wlan active!!\n");
+ btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
+ false);
+ } else {
+ /* BT already NOT ignore Wlan active, do nothing here.*/
+ }
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+ if ((coex_sta->bt_info_ext & BIT4)) {
+ /* BT auto report already enabled, do nothing*/
+ } else {
+ btc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC,
+ true);
+ }
+#endif
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan*/
+ if (bt_info & BT_INFO_8723B_2ANT_B_INQ_PAGE)
+ coex_sta->c2h_bt_inquiry_page = true;
+ else
+ coex_sta->c2h_bt_inquiry_page = false;
+
+ /* set link exist status*/
+ if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ coex_sta->bt_link_exist = false;
+ coex_sta->pan_exist = false;
+ coex_sta->a2dp_exist = false;
+ coex_sta->hid_exist = false;
+ coex_sta->sco_exist = false;
+ } else { /* connection exists */
+ coex_sta->bt_link_exist = true;
+ if (bt_info & BT_INFO_8723B_2ANT_B_FTP)
+ coex_sta->pan_exist = true;
+ else
+ coex_sta->pan_exist = false;
+ if (bt_info & BT_INFO_8723B_2ANT_B_A2DP)
+ coex_sta->a2dp_exist = true;
+ else
+ coex_sta->a2dp_exist = false;
+ if (bt_info & BT_INFO_8723B_2ANT_B_HID)
+ coex_sta->hid_exist = true;
+ else
+ coex_sta->hid_exist = false;
+ if (bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO)
+ coex_sta->sco_exist = true;
+ else
+ coex_sta->sco_exist = false;
+ }
+
+ btc8723b2ant_update_bt_link_info(btcoexist);
+
+ if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Connected idle!!!\n");
+ /* connection exists but no busy */
+ } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
+ (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ } else if (bt_info & BT_INFO_8723B_2ANT_B_ACL_BUSY) {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ } else {
+ coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), "
+ "BT Non-Defined state!!!\n");
+ }
+
+ if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+ (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+ bt_busy = true;
+ limited_dig = true;
+ } else {
+ bt_busy = false;
+ limited_dig = false;
+ }
+
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+ coex_dm->limited_dig = limited_dig;
+ btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+ btc8723b2ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+ u8 type)
+{
+ if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex],StackOP Inquiry/page/pair start notify\n");
+ else if (BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+ "[BTCoex],StackOP Inquiry/page/pair finish notify\n");
+}
+
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+ btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+ btc8723b_med_stat_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist)
+{
+ struct btc_board_info *board_info = &btcoexist->board_info;
+ struct btc_stack_info *stack_info = &btcoexist->stack_info;
+ static u8 dis_ver_info_cnt;
+ u32 fw_ver = 0, bt_patch_ver = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "[BTCoex], =========================="
+ "Periodical===========================\n");
+
+ if (dis_ver_info_cnt <= 5) {
+ dis_ver_info_cnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], ****************************"
+ "************************************\n");
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ "
+ "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
+ board_info->btdm_ant_num, board_info->btdm_ant_pos);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+ &bt_patch_ver);
+ btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = "
+ "%d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+ "[BTCoex], *****************************"
+ "***********************************\n");
+ }
+
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+ btc8723b2ant_query_bt_info(btcoexist);
+ btc8723b2ant_monitor_bt_ctr(btcoexist);
+ btc8723b2ant_monitor_bt_enable_disable(btcoexist);
+#else
+ if (btc8723b2ant_is_wifi_status_changed(btcoexist) ||
+ coex_dm->auto_tdma_adjust)
+ btc8723b2ant_run_coexist_mechanism(btcoexist);
+#endif
+}
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
new file mode 100644
index 000000000000..e0ad8e545f82
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
@@ -0,0 +1,173 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2012 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#ifndef _HAL8723B_2_ANT
+#define _HAL8723B_2_ANT
+
+/************************************************************************
+ * The following is for 8723B 2Ant BT Co-exist definition
+ ************************************************************************/
+#define BT_AUTO_REPORT_ONLY_8723B_2ANT 1
+
+#define BT_INFO_8723B_2ANT_B_FTP BIT7
+#define BT_INFO_8723B_2ANT_B_A2DP BIT6
+#define BT_INFO_8723B_2ANT_B_HID BIT5
+#define BT_INFO_8723B_2ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723B_2ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723B_2ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723B_2ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723B_2ANT_B_CONNECTION BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2
+
+enum BT_INFO_SRC_8723B_2ANT {
+ BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723B_2ANT_MAX
+};
+
+enum BT_8723B_2ANT_BT_STATUS {
+ BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_2ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_2ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_2ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_2ANT_BT_STATUS_MAX
+};
+
+enum BT_8723B_2ANT_COEX_ALGO {
+ BT_8723B_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723B_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723B_2ANT_COEX_ALGO_HID = 0x2,
+ BT_8723B_2ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8723B_2ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8723B_2ANT_COEX_ALGO_MAX = 0xb,
+};
+
+struct coex_dm_8723b_2ant {
+ /* fw mechanism */
+ bool pre_dec_bt_pwr;
+ bool cur_dec_bt_pwr;
+ u8 pre_fw_dac_swing_lvl;
+ u8 cur_fw_dac_swing_lvl;
+ bool cur_ignore_wlan_act;
+ bool pre_ignore_wlan_act;
+ u8 pre_ps_tdma;
+ u8 cur_ps_tdma;
+ u8 ps_tdma_para[5];
+ u8 tdma_adj_type;
+ bool reset_tdma_adjust;
+ bool auto_tdma_adjust;
+ bool pre_ps_tdma_on;
+ bool cur_ps_tdma_on;
+ bool pre_bt_auto_report;
+ bool cur_bt_auto_report;
+
+ /* sw mechanism */
+ bool pre_rf_rx_lpf_shrink;
+ bool cur_rf_rx_lpf_shrink;
+ u32 bt_rf0x1e_backup;
+ bool pre_low_penalty_ra;
+ bool cur_low_penalty_ra;
+ bool pre_dac_swing_on;
+ u32 pre_dac_swing_lvl;
+ bool cur_dac_swing_on;
+ u32 cur_dac_swing_lvl;
+ bool pre_adc_back_off;
+ bool cur_adc_back_off;
+ bool pre_agc_table_en;
+ bool cur_agc_table_en;
+ u32 pre_val0x6c0;
+ u32 cur_val0x6c0;
+ u32 pre_val0x6c4;
+ u32 cur_val0x6c4;
+ u32 pre_val0x6c8;
+ u32 cur_val0x6c8;
+ u8 pre_val0x6cc;
+ u8 cur_val0x6cc;
+ bool limited_dig;
+
+ /* algorithm related */
+ u8 pre_algorithm;
+ u8 cur_algorithm;
+ u8 bt_status;
+ u8 wifi_chnl_info[3];
+
+ bool need_recover_0x948;
+ u16 backup_0x948;
+};
+
+struct coex_sta_8723b_2ant {
+ bool bt_link_exist;
+ bool sco_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ bool pan_exist;
+
+ bool under_lps;
+ bool under_ips;
+ u32 high_priority_tx;
+ u32 high_priority_rx;
+ u32 low_priority_tx;
+ u32 low_priority_rx;
+ u8 bt_rssi;
+ u8 pre_bt_rssi_state;
+ u8 pre_wifi_rssi_state[4];
+ bool c2h_bt_info_req_sent;
+ u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
+ u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX];
+ bool c2h_bt_inquiry_page;
+ u8 bt_retry_cnt;
+ u8 bt_info_ext;
+};
+
+/*********************************************************************
+ * The following is interface which will notify coex module.
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void btc8723b_med_stat_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmpbuf, u8 length);
+void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+ u8 type);
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
new file mode 100644
index 000000000000..b6722de64a31
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -0,0 +1,1011 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
+
+#include "halbt_precomp.h"
+
+/***********************************************
+ * Global variables
+ ***********************************************/
+
+struct btc_coexist gl_bt_coexist;
+
+u32 btc_dbg_type[BTC_MSG_MAX];
+static u8 btc_dbg_buf[100];
+
+/***************************************************
+ * Debug related function
+ ***************************************************/
+static bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist)
+{
+ if (!btcoexist->binded || NULL == btcoexist->adapter)
+ return false;
+
+ return true;
+}
+
+static bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv)
+{
+ if (rtlpriv->link_info.busytraffic)
+ return true;
+ else
+ return false;
+}
+
+static void halbtc_dbg_init(void)
+{
+ u8 i;
+
+ for (i = 0; i < BTC_MSG_MAX; i++)
+ btc_dbg_type[i] = 0;
+
+ btc_dbg_type[BTC_MSG_INTERFACE] =
+/* INTF_INIT | */
+/* INTF_NOTIFY | */
+ 0;
+
+ btc_dbg_type[BTC_MSG_ALGORITHM] =
+/* ALGO_BT_RSSI_STATE | */
+/* ALGO_WIFI_RSSI_STATE | */
+/* ALGO_BT_MONITOR | */
+/* ALGO_TRACE | */
+/* ALGO_TRACE_FW | */
+/* ALGO_TRACE_FW_DETAIL | */
+/* ALGO_TRACE_FW_EXEC | */
+/* ALGO_TRACE_SW | */
+/* ALGO_TRACE_SW_DETAIL | */
+/* ALGO_TRACE_SW_EXEC | */
+ 0;
+}
+
+static bool halbtc_is_bt40(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool is_ht40 = true;
+ enum ht_channel_width bw = rtlphy->current_chan_bw;
+
+ if (bw == HT_CHANNEL_WIDTH_20)
+ is_ht40 = false;
+ else if (bw == HT_CHANNEL_WIDTH_20_40)
+ is_ht40 = true;
+
+ return is_ht40;
+}
+
+static bool halbtc_legacy(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ bool is_legacy = false;
+
+ if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+ is_legacy = true;
+
+ return is_legacy;
+}
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+
+ if (rtlpriv->link_info.tx_busy_traffic)
+ return true;
+ else
+ return false;
+}
+
+static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv =
+ (struct rtl_priv *)btcoexist->adapter;
+ u32 wifi_bw = BTC_WIFI_BW_HT20;
+
+ if (halbtc_is_bt40(rtlpriv)) {
+ wifi_bw = BTC_WIFI_BW_HT40;
+ } else {
+ if (halbtc_legacy(rtlpriv))
+ wifi_bw = BTC_WIFI_BW_LEGACY;
+ else
+ wifi_bw = BTC_WIFI_BW_HT20;
+ }
+ return wifi_bw;
+}
+
+static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 chnl = 1;
+
+ if (rtlphy->current_channel != 0)
+ chnl = rtlphy->current_channel;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+ "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+ return chnl;
+}
+
+static void halbtc_leave_lps(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv;
+ struct rtl_ps_ctl *ppsc;
+ bool ap_enable = false;
+
+ rtlpriv = btcoexist->adapter;
+ ppsc = rtl_psc(rtlpriv);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ pr_info("halbtc_leave_lps()<--dont leave lps under AP mode\n");
+ return;
+ }
+
+ btcoexist->bt_info.bt_ctrl_lps = true;
+ btcoexist->bt_info.bt_lps_on = false;
+}
+
+static void halbtc_enter_lps(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv;
+ struct rtl_ps_ctl *ppsc;
+ bool ap_enable = false;
+
+ rtlpriv = btcoexist->adapter;
+ ppsc = rtl_psc(rtlpriv);
+
+ btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ &ap_enable);
+
+ if (ap_enable) {
+ pr_info("halbtc_enter_lps()<--dont enter lps under AP mode\n");
+ return;
+ }
+
+ btcoexist->bt_info.bt_ctrl_lps = true;
+ btcoexist->bt_info.bt_lps_on = false;
+}
+
+static void halbtc_normal_lps(struct btc_coexist *btcoexist)
+{
+ if (btcoexist->bt_info.bt_ctrl_lps) {
+ btcoexist->bt_info.bt_lps_on = false;
+ btcoexist->bt_info.bt_ctrl_lps = false;
+ }
+}
+
+static void halbtc_leave_low_power(void)
+{
+}
+
+static void halbtc_nomal_low_power(void)
+{
+}
+
+static void halbtc_disable_low_power(void)
+{
+}
+
+static void halbtc_aggregation_check(void)
+{
+}
+
+static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
+{
+ return 0;
+}
+
+static s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter)
+{
+ struct rtl_priv *rtlpriv = adapter;
+ s32 undec_sm_pwdb = 0;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+ else /* associated entry pwdb */
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+ return undec_sm_pwdb;
+}
+
+static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ bool *bool_tmp = (bool *)out_buf;
+ int *s32_tmp = (int *)out_buf;
+ u32 *u32_tmp = (u32 *)out_buf;
+ u8 *u8_tmp = (u8 *)out_buf;
+ bool tmp = false;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return false;
+
+ switch (get_type) {
+ case BTC_GET_BL_HS_OPERATION:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_HS_CONNECTING:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_CONNECTED:
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ tmp = true;
+ *bool_tmp = tmp;
+ break;
+ case BTC_GET_BL_WIFI_BUSY:
+ if (halbtc_is_wifi_busy(rtlpriv))
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_SCAN:
+ if (mac->act_scanning)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_LINK:
+ if (mac->link_state == MAC80211_LINKING)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_ROAM: /*TODO*/
+ if (mac->link_state == MAC80211_LINKING)
+ *bool_tmp = true;
+ else
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_4_WAY_PROGRESS: /*TODO*/
+ *bool_tmp = false;
+
+ break;
+ case BTC_GET_BL_WIFI_UNDER_5G:
+ *bool_tmp = false; /*TODO*/
+
+ case BTC_GET_BL_WIFI_DHCP: /*TODO*/
+ break;
+ case BTC_GET_BL_WIFI_SOFTAP_IDLE:
+ *bool_tmp = true;
+ break;
+ case BTC_GET_BL_WIFI_SOFTAP_LINKING:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
+ if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm)
+ *bool_tmp = false;
+ else
+ *bool_tmp = true;
+ break;
+ case BTC_GET_BL_WIFI_UNDER_B_MODE:
+ *bool_tmp = false; /*TODO*/
+ break;
+ case BTC_GET_BL_EXT_SWITCH:
+ *bool_tmp = false;
+ break;
+ case BTC_GET_S4_WIFI_RSSI:
+ *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+ break;
+ case BTC_GET_S4_HS_RSSI: /*TODO*/
+ *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+ break;
+ case BTC_GET_U4_WIFI_BW:
+ *u32_tmp = halbtc_get_wifi_bw(btcoexist);
+ break;
+ case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
+ if (halbtc_is_wifi_uplink(rtlpriv))
+ *u32_tmp = BTC_WIFI_TRAFFIC_TX;
+ else
+ *u32_tmp = BTC_WIFI_TRAFFIC_RX;
+ break;
+ case BTC_GET_U4_WIFI_FW_VER:
+ *u32_tmp = rtlhal->fw_version;
+ break;
+ case BTC_GET_U4_BT_PATCH_VER:
+ *u32_tmp = halbtc_get_bt_patch_version(btcoexist);
+ break;
+ case BTC_GET_U1_WIFI_DOT11_CHNL:
+ *u8_tmp = rtlphy->current_channel;
+ break;
+ case BTC_GET_U1_WIFI_CENTRAL_CHNL:
+ *u8_tmp = halbtc_get_wifi_central_chnl(btcoexist);
+ break;
+ case BTC_GET_U1_WIFI_HS_CHNL:
+ *u8_tmp = 1;/*BT_OperateChnl(rtlpriv);*/
+ break;
+ case BTC_GET_U1_MAC_PHY_MODE:
+ *u8_tmp = BTC_MP_UNKNOWN;
+ break;
+
+ /************* 1Ant **************/
+ case BTC_GET_U1_LPS_MODE:
+ *u8_tmp = btcoexist->pwr_mode_val[0];
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+ bool *bool_tmp = (bool *)in_buf;
+ u8 *u8_tmp = (u8 *)in_buf;
+ u32 *u32_tmp = (u32 *)in_buf;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return false;
+
+ switch (set_type) {
+ /* set some bool type variables. */
+ case BTC_SET_BL_BT_DISABLE:
+ btcoexist->bt_info.bt_disabled = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_TRAFFIC_BUSY:
+ btcoexist->bt_info.bt_busy = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_LIMITED_DIG:
+ btcoexist->bt_info.limited_dig = *bool_tmp;
+ break;
+ case BTC_SET_BL_FORCE_TO_ROAM:
+ btcoexist->bt_info.force_to_roam = *bool_tmp;
+ break;
+ case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
+ btcoexist->bt_info.reject_agg_pkt = *bool_tmp;
+ break;
+ case BTC_SET_BL_BT_CTRL_AGG_SIZE:
+ btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp;
+ break;
+ case BTC_SET_BL_INC_SCAN_DEV_NUM:
+ btcoexist->bt_info.increase_scan_dev_num = *bool_tmp;
+ break;
+ /* set some u1Byte type variables. */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
+ btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp;
+ break;
+ case BTC_SET_U1_AGG_BUF_SIZE:
+ btcoexist->bt_info.agg_buf_size = *u8_tmp;
+ break;
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_GET_BT_RSSI:
+ /*BTHCI_SendGetBtRssiEvent(rtlpriv);*/
+ break;
+ case BTC_SET_ACT_AGGREGATE_CTRL:
+ halbtc_aggregation_check();
+ break;
+
+ /* 1Ant */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
+ btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp;
+ break;
+ case BTC_SET_UI_SCAN_SIG_COMPENSATION:
+ /* rtlpriv->mlmepriv.scan_compensation = *u8_tmp; */
+ break;
+ case BTC_SET_U1_1ANT_LPS:
+ btcoexist->bt_info.lps_1ant = *u8_tmp;
+ break;
+ case BTC_SET_U1_1ANT_RPWM:
+ btcoexist->bt_info.rpwm_1ant = *u8_tmp;
+ break;
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_LEAVE_LPS:
+ halbtc_leave_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_ENTER_LPS:
+ halbtc_enter_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_NORMAL_LPS:
+ halbtc_normal_lps(btcoexist);
+ break;
+ case BTC_SET_ACT_DISABLE_LOW_POWER:
+ halbtc_disable_low_power();
+ break;
+ case BTC_SET_ACT_UPDATE_ra_mask:
+ btcoexist->bt_info.ra_mask = *u32_tmp;
+ break;
+ case BTC_SET_ACT_SEND_MIMO_PS:
+ break;
+ case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT:
+ btcoexist->bt_info.force_exec_pwr_cmd_cnt++;
+ break;
+ case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/
+ break;
+ case BTC_SET_ACT_CTRL_BT_COEX:
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist)
+{
+}
+
+static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist)
+{
+}
+
+static void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist)
+{
+}
+
+static void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist)
+{
+}
+
+/************************************************************
+ * IO related function
+ ************************************************************/
+static u8 halbtc_read_1byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_byte(rtlpriv, reg_addr);
+}
+
+static u16 halbtc_read_2byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_word(rtlpriv, reg_addr);
+}
+
+static u32 halbtc_read_4byte(void *bt_context, u32 reg_addr)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_read_dword(rtlpriv, reg_addr);
+}
+
+static void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr,
+ u32 bit_mask, u8 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ u8 original_value, bit_shift = 0;
+ u8 i;
+
+ if (bit_mask != MASKDWORD) {/*if not "double word" write*/
+ original_value = rtl_read_byte(rtlpriv, reg_addr);
+ for (i = 0; i <= 7; i++) {
+ if ((bit_mask>>i) & 0x1)
+ break;
+ }
+ bit_shift = i;
+ data = (original_value & (~bit_mask)) |
+ ((data << bit_shift) & bit_mask);
+ }
+ rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_word(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
+{
+ struct btc_coexist *btcoexist =
+ (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_write_dword(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask,
+ u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+static u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
+}
+
+static void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data);
+}
+
+static u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask);
+}
+
+static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buf)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+ rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id,
+ cmd_len, cmd_buf);
+}
+
+static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type)
+{
+ struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+ switch (disp_type) {
+ case BTC_DBG_DISP_COEX_STATISTICS:
+ halbtc_display_coex_statistics(btcoexist);
+ break;
+ case BTC_DBG_DISP_BT_LINK_INFO:
+ halbtc_display_bt_link_info(btcoexist);
+ break;
+ case BTC_DBG_DISP_BT_FW_VER:
+ halbtc_display_bt_fw_info(btcoexist);
+ break;
+ case BTC_DBG_DISP_FW_PWR_MODE_CMD:
+ halbtc_display_fw_pwr_mode_cmd(btcoexist);
+ break;
+ default:
+ break;
+ }
+}
+
+/*****************************************************************
+ * Extern functions called by other module
+ *****************************************************************/
+bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ btcoexist->statistics.cnt_bind++;
+
+ halbtc_dbg_init();
+
+ if (btcoexist->binded)
+ return false;
+ else
+ btcoexist->binded = true;
+
+#if (defined(CONFIG_PCI_HCI))
+ btcoexist->chip_interface = BTC_INTF_PCI;
+#elif (defined(CONFIG_USB_HCI))
+ btcoexist->chip_interface = BTC_INTF_USB;
+#elif (defined(CONFIG_SDIO_HCI))
+ btcoexist->chip_interface = BTC_INTF_SDIO;
+#elif (defined(CONFIG_GSPI_HCI))
+ btcoexist->chip_interface = BTC_INTF_GSPI;
+#else
+ btcoexist->chip_interface = BTC_INTF_UNKNOWN;
+#endif
+
+ if (NULL == btcoexist->adapter)
+ btcoexist->adapter = adapter;
+
+ btcoexist->stack_info.profile_notified = false;
+
+ btcoexist->btc_read_1byte = halbtc_read_1byte;
+ btcoexist->btc_write_1byte = halbtc_write_1byte;
+ btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte;
+ btcoexist->btc_read_2byte = halbtc_read_2byte;
+ btcoexist->btc_write_2byte = halbtc_write_2byte;
+ btcoexist->btc_read_4byte = halbtc_read_4byte;
+ btcoexist->btc_write_4byte = halbtc_write_4byte;
+
+ btcoexist->btc_set_bb_reg = halbtc_set_bbreg;
+ btcoexist->btc_get_bb_reg = halbtc_get_bbreg;
+
+ btcoexist->btc_set_rf_reg = halbtc_set_rfreg;
+ btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
+
+ btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+ btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
+
+ btcoexist->btc_get = halbtc_get;
+ btcoexist->btc_set = halbtc_set;
+
+ btcoexist->cli_buf = &btc_dbg_buf[0];
+
+ btcoexist->bt_info.b_bt_ctrl_buf_size = false;
+ btcoexist->bt_info.agg_buf_size = 5;
+
+ btcoexist->bt_info.increase_scan_dev_num = false;
+ return true;
+}
+
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_init_hw_config++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_init_hwconfig(btcoexist);
+}
+
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->statistics.cnt_init_coex_dm++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_init_coex_dm(btcoexist);
+
+ btcoexist->initilized = true;
+}
+
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 ips_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_ips_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (ERFOFF == type)
+ ips_type = BTC_IPS_ENTER;
+ else
+ ips_type = BTC_IPS_LEAVE;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 lps_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_lps_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (EACTIVE == type)
+ lps_type = BTC_LPS_DISABLE;
+ else
+ lps_type = BTC_LPS_ENABLE;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type);
+}
+
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 scan_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_scan_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (type)
+ scan_type = BTC_SCAN_START;
+ else
+ scan_type = BTC_SCAN_FINISH;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 asso_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_connect_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (action)
+ asso_type = BTC_ASSOCIATE_START;
+ else
+ asso_type = BTC_ASSOCIATE_FINISH;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type);
+}
+
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+ enum _RT_MEDIA_STATUS media_status)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 status;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_media_status_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ if (RT_MEDIA_CONNECT == media_status)
+ status = BTC_MEDIA_CONNECT;
+ else
+ status = BTC_MEDIA_DISCONNECT;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ btc8723b_med_stat_notify(btcoexist, status);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 packet_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_special_packet_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ packet_type = BTC_PACKET_DHCP;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_special_packet_notify(btcoexist,
+ packet_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist,
+ u8 *tmp_buf, u8 length)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_bt_info_notify++;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length);
+}
+
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 stack_op_type;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_stack_operation_notify++;
+ if (btcoexist->manual_control)
+ return;
+
+ stack_op_type = BTC_STACK_OP_NONE;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_stack_operation_notify(btcoexist,
+ stack_op_type);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_halt_notify(btcoexist);
+}
+
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+}
+
+void exhalbtc_periodical(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_periodical++;
+
+ halbtc_leave_low_power();
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_periodical(btcoexist);
+
+ halbtc_nomal_low_power();
+}
+
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
+ u8 code, u8 len, u8 *data)
+{
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+ btcoexist->statistics.cnt_dbg_ctrl++;
+}
+
+void exhalbtc_stack_update_profile_info(void)
+{
+}
+
+void exhalbtc_update_min_bt_rssi(char bt_rssi)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->stack_info.min_bt_rssi = bt_rssi;
+}
+
+void exhalbtc_set_hci_version(u16 hci_version)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->stack_info.hci_version = hci_version;
+}
+
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
+{
+ struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ btcoexist->bt_info.bt_real_fw_ver = bt_patch_version;
+ btcoexist->bt_info.bt_hci_ver = bt_hci_version;
+}
+
+void exhalbtc_set_bt_exist(bool bt_exist)
+{
+ gl_bt_coexist.board_info.bt_exist = bt_exist;
+}
+
+void exhalbtc_set_chip_type(u8 chip_type)
+{
+ switch (chip_type) {
+ default:
+ case BT_2WIRE:
+ case BT_ISSC_3WIRE:
+ case BT_ACCEL:
+ case BT_RTL8756:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
+ break;
+ case BT_CSR_BC4:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
+ break;
+ case BT_CSR_BC8:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
+ break;
+ case BT_RTL8723A:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
+ break;
+ case BT_RTL8821A:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
+ break;
+ case BT_RTL8723B:
+ gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
+ break;
+ }
+}
+
+void exhalbtc_set_ant_num(u8 type, u8 ant_num)
+{
+ if (BT_COEX_ANT_TYPE_PG == type) {
+ gl_bt_coexist.board_info.pg_ant_num = ant_num;
+ gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+ gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+ }
+}
+
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+{
+ struct rtl_priv *rtlpriv = btcoexist->adapter;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ if (!halbtc_is_bt_coexist_available(btcoexist))
+ return;
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+ ex_halbtc8723b2ant_display_coex_info(btcoexist);
+}
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
new file mode 100644
index 000000000000..871fc3c6d559
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -0,0 +1,559 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#ifndef __HALBTC_OUT_SRC_H__
+#define __HALBTC_OUT_SRC_H__
+
+#include "../wifi.h"
+
+#define NORMAL_EXEC false
+#define FORCE_EXEC true
+
+#define BTC_RF_A RF90_PATH_A
+#define BTC_RF_B RF90_PATH_B
+#define BTC_RF_C RF90_PATH_C
+#define BTC_RF_D RF90_PATH_D
+
+#define BTC_SMSP SINGLEMAC_SINGLEPHY
+#define BTC_DMDP DUALMAC_DUALPHY
+#define BTC_DMSP DUALMAC_SINGLEPHY
+#define BTC_MP_UNKNOWN 0xff
+
+#define IN
+#define OUT
+
+#define BT_TMP_BUF_SIZE 100
+
+#define BT_COEX_ANT_TYPE_PG 0
+#define BT_COEX_ANT_TYPE_ANTDIV 1
+#define BT_COEX_ANT_TYPE_DETECTED 2
+
+#define BTC_MIMO_PS_STATIC 0
+#define BTC_MIMO_PS_DYNAMIC 1
+
+#define BTC_RATE_DISABLE 0
+#define BTC_RATE_ENABLE 1
+
+#define BTC_ANT_PATH_WIFI 0
+#define BTC_ANT_PATH_BT 1
+#define BTC_ANT_PATH_PTA 2
+
+enum btc_chip_interface {
+ BTC_INTF_UNKNOWN = 0,
+ BTC_INTF_PCI = 1,
+ BTC_INTF_USB = 2,
+ BTC_INTF_SDIO = 3,
+ BTC_INTF_GSPI = 4,
+ BTC_INTF_MAX
+};
+
+enum BTC_CHIP_TYPE {
+ BTC_CHIP_UNDEF = 0,
+ BTC_CHIP_CSR_BC4 = 1,
+ BTC_CHIP_CSR_BC8 = 2,
+ BTC_CHIP_RTL8723A = 3,
+ BTC_CHIP_RTL8821 = 4,
+ BTC_CHIP_RTL8723B = 5,
+ BTC_CHIP_MAX
+};
+
+enum BTC_MSG_TYPE {
+ BTC_MSG_INTERFACE = 0x0,
+ BTC_MSG_ALGORITHM = 0x1,
+ BTC_MSG_MAX
+};
+extern u32 btc_dbg_type[];
+
+/* following is for BTC_MSG_INTERFACE */
+#define INTF_INIT BIT0
+#define INTF_NOTIFY BIT2
+
+/* following is for BTC_ALGORITHM */
+#define ALGO_BT_RSSI_STATE BIT0
+#define ALGO_WIFI_RSSI_STATE BIT1
+#define ALGO_BT_MONITOR BIT2
+#define ALGO_TRACE BIT3
+#define ALGO_TRACE_FW BIT4
+#define ALGO_TRACE_FW_DETAIL BIT5
+#define ALGO_TRACE_FW_EXEC BIT6
+#define ALGO_TRACE_SW BIT7
+#define ALGO_TRACE_SW_DETAIL BIT8
+#define ALGO_TRACE_SW_EXEC BIT9
+
+#define BT_COEX_ANT_TYPE_PG 0
+#define BT_COEX_ANT_TYPE_ANTDIV 1
+#define BT_COEX_ANT_TYPE_DETECTED 2
+#define BTC_MIMO_PS_STATIC 0
+#define BTC_MIMO_PS_DYNAMIC 1
+#define BTC_RATE_DISABLE 0
+#define BTC_RATE_ENABLE 1
+#define BTC_ANT_PATH_WIFI 0
+#define BTC_ANT_PATH_BT 1
+#define BTC_ANT_PATH_PTA 2
+
+
+#define CL_SPRINTF snprintf
+#define CL_PRINTF printk
+
+#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+ printk(printstr, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define BTC_PRINT_F(dbgtype, dbgflag, printstr, ...) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+ pr_info("%s: ", __func__); \
+ printk(printstr, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \
+ int __i; \
+ u8 *__ptr = (u8 *)_ptr; \
+ printk printstr; \
+ for (__i = 0; __i < 6; __i++) \
+ printk("%02X%s", __ptr[__i], (__i == 5) ? \
+ "" : "-"); \
+ pr_info("\n"); \
+ } \
+ } while (0)
+
+#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \
+ do { \
+ if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \
+ int __i; \
+ u8 *__ptr = (u8 *)_hexdata; \
+ printk(_titlestring); \
+ for (__i = 0; __i < (int)_hexdatalen; __i++) { \
+ printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \
+ == 0) ? " " : " ");\
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ pr_debug("\n"); \
+ } \
+ } while (0)
+
+#define BTC_ANT_PATH_WIFI 0
+#define BTC_ANT_PATH_BT 1
+#define BTC_ANT_PATH_PTA 2
+
+enum btc_power_save_type {
+ BTC_PS_WIFI_NATIVE = 0,
+ BTC_PS_LPS_ON = 1,
+ BTC_PS_LPS_OFF = 2,
+ BTC_PS_LPS_MAX
+};
+
+struct btc_board_info {
+ /* The following is some board information */
+ u8 bt_chip_type;
+ u8 pg_ant_num; /* pg ant number */
+ u8 btdm_ant_num; /* ant number for btdm */
+ u8 btdm_ant_pos;
+ bool bt_exist;
+};
+
+enum btc_dbg_opcode {
+ BTC_DBG_SET_COEX_NORMAL = 0x0,
+ BTC_DBG_SET_COEX_WIFI_ONLY = 0x1,
+ BTC_DBG_SET_COEX_BT_ONLY = 0x2,
+ BTC_DBG_MAX
+};
+
+enum btc_rssi_state {
+ BTC_RSSI_STATE_HIGH = 0x0,
+ BTC_RSSI_STATE_MEDIUM = 0x1,
+ BTC_RSSI_STATE_LOW = 0x2,
+ BTC_RSSI_STATE_STAY_HIGH = 0x3,
+ BTC_RSSI_STATE_STAY_MEDIUM = 0x4,
+ BTC_RSSI_STATE_STAY_LOW = 0x5,
+ BTC_RSSI_MAX
+};
+
+enum btc_wifi_role {
+ BTC_ROLE_STATION = 0x0,
+ BTC_ROLE_AP = 0x1,
+ BTC_ROLE_IBSS = 0x2,
+ BTC_ROLE_HS_MODE = 0x3,
+ BTC_ROLE_MAX
+};
+
+enum btc_wifi_bw_mode {
+ BTC_WIFI_BW_LEGACY = 0x0,
+ BTC_WIFI_BW_HT20 = 0x1,
+ BTC_WIFI_BW_HT40 = 0x2,
+ BTC_WIFI_BW_MAX
+};
+
+enum btc_wifi_traffic_dir {
+ BTC_WIFI_TRAFFIC_TX = 0x0,
+ BTC_WIFI_TRAFFIC_RX = 0x1,
+ BTC_WIFI_TRAFFIC_MAX
+};
+
+enum btc_wifi_pnp {
+ BTC_WIFI_PNP_WAKE_UP = 0x0,
+ BTC_WIFI_PNP_SLEEP = 0x1,
+ BTC_WIFI_PNP_MAX
+};
+
+
+enum btc_get_type {
+ /* type bool */
+ BTC_GET_BL_HS_OPERATION,
+ BTC_GET_BL_HS_CONNECTING,
+ BTC_GET_BL_WIFI_CONNECTED,
+ BTC_GET_BL_WIFI_BUSY,
+ BTC_GET_BL_WIFI_SCAN,
+ BTC_GET_BL_WIFI_LINK,
+ BTC_GET_BL_WIFI_DHCP,
+ BTC_GET_BL_WIFI_SOFTAP_IDLE,
+ BTC_GET_BL_WIFI_SOFTAP_LINKING,
+ BTC_GET_BL_WIFI_IN_EARLY_SUSPEND,
+ BTC_GET_BL_WIFI_ROAM,
+ BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ BTC_GET_BL_WIFI_UNDER_5G,
+ BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
+ BTC_GET_BL_WIFI_UNDER_B_MODE,
+ BTC_GET_BL_EXT_SWITCH,
+
+ /* type s4Byte */
+ BTC_GET_S4_WIFI_RSSI,
+ BTC_GET_S4_HS_RSSI,
+
+ /* type u32 */
+ BTC_GET_U4_WIFI_BW,
+ BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ BTC_GET_U4_WIFI_FW_VER,
+ BTC_GET_U4_BT_PATCH_VER,
+
+ /* type u1Byte */
+ BTC_GET_U1_WIFI_DOT11_CHNL,
+ BTC_GET_U1_WIFI_CENTRAL_CHNL,
+ BTC_GET_U1_WIFI_HS_CHNL,
+ BTC_GET_U1_MAC_PHY_MODE,
+
+ /* for 1Ant */
+ BTC_GET_U1_LPS_MODE,
+ BTC_GET_BL_BT_SCO_BUSY,
+
+ /* for test mode */
+ BTC_GET_DRIVER_TEST_CFG,
+ BTC_GET_MAX
+};
+
+
+enum btc_set_type {
+ /* type bool */
+ BTC_SET_BL_BT_DISABLE,
+ BTC_SET_BL_BT_TRAFFIC_BUSY,
+ BTC_SET_BL_BT_LIMITED_DIG,
+ BTC_SET_BL_FORCE_TO_ROAM,
+ BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ BTC_SET_BL_INC_SCAN_DEV_NUM,
+
+ /* type u1Byte */
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+ BTC_SET_UI_SCAN_SIG_COMPENSATION,
+ BTC_SET_U1_AGG_BUF_SIZE,
+
+ /* type trigger some action */
+ BTC_SET_ACT_GET_BT_RSSI,
+ BTC_SET_ACT_AGGREGATE_CTRL,
+
+ /********* for 1Ant **********/
+ /* type bool */
+ BTC_SET_BL_BT_SCO_BUSY,
+ /* type u1Byte */
+ BTC_SET_U1_1ANT_LPS,
+ BTC_SET_U1_1ANT_RPWM,
+ /* type trigger some action */
+ BTC_SET_ACT_LEAVE_LPS,
+ BTC_SET_ACT_ENTER_LPS,
+ BTC_SET_ACT_NORMAL_LPS,
+ BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ BTC_SET_ACT_UPDATE_ra_mask,
+ BTC_SET_ACT_SEND_MIMO_PS,
+ /* BT Coex related */
+ BTC_SET_ACT_CTRL_BT_INFO,
+ BTC_SET_ACT_CTRL_BT_COEX,
+ /***************************/
+ BTC_SET_MAX
+};
+
+enum btc_dbg_disp_type {
+ BTC_DBG_DISP_COEX_STATISTICS = 0x0,
+ BTC_DBG_DISP_BT_LINK_INFO = 0x1,
+ BTC_DBG_DISP_BT_FW_VER = 0x2,
+ BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+ BTC_DBG_DISP_MAX
+};
+
+enum btc_notify_type_ips {
+ BTC_IPS_LEAVE = 0x0,
+ BTC_IPS_ENTER = 0x1,
+ BTC_IPS_MAX
+};
+
+enum btc_notify_type_lps {
+ BTC_LPS_DISABLE = 0x0,
+ BTC_LPS_ENABLE = 0x1,
+ BTC_LPS_MAX
+};
+
+enum btc_notify_type_scan {
+ BTC_SCAN_FINISH = 0x0,
+ BTC_SCAN_START = 0x1,
+ BTC_SCAN_MAX
+};
+
+enum btc_notify_type_associate {
+ BTC_ASSOCIATE_FINISH = 0x0,
+ BTC_ASSOCIATE_START = 0x1,
+ BTC_ASSOCIATE_MAX
+};
+
+enum btc_notify_type_media_status {
+ BTC_MEDIA_DISCONNECT = 0x0,
+ BTC_MEDIA_CONNECT = 0x1,
+ BTC_MEDIA_MAX
+};
+
+enum btc_notify_type_special_packet {
+ BTC_PACKET_UNKNOWN = 0x0,
+ BTC_PACKET_DHCP = 0x1,
+ BTC_PACKET_ARP = 0x2,
+ BTC_PACKET_EAPOL = 0x3,
+ BTC_PACKET_MAX
+};
+
+enum btc_notify_type_stack_operation {
+ BTC_STACK_OP_NONE = 0x0,
+ BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1,
+ BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2,
+ BTC_STACK_OP_MAX
+};
+
+
+typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
+
+typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
+
+typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
+
+typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data);
+
+typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u8 data1b);
+
+typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
+
+typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
+
+typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
+ u8 bit_mask, u8 data);
+
+typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
+ u32 bit_mask);
+
+typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
+ u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
+ u32 reg_addr, u32 bit_mask);
+
+typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
+ u32 cmd_len, u8 *cmd_buffer);
+
+typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
+
+typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+
+typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
+
+struct btc_bt_info {
+ bool bt_disabled;
+ u8 rssi_adjust_for_agc_table_on;
+ u8 rssi_adjust_for_1ant_coex_type;
+ bool bt_busy;
+ u8 agg_buf_size;
+ bool limited_dig;
+ bool reject_agg_pkt;
+ bool b_bt_ctrl_buf_size;
+ bool increase_scan_dev_num;
+ u16 bt_hci_ver;
+ u16 bt_real_fw_ver;
+ u8 bt_fw_ver;
+
+ /* the following is for 1Ant solution */
+ bool bt_ctrl_lps;
+ bool bt_pwr_save_mode;
+ bool bt_lps_on;
+ bool force_to_roam;
+ u8 force_exec_pwr_cmd_cnt;
+ u8 lps_1ant;
+ u8 rpwm_1ant;
+ u32 ra_mask;
+};
+
+struct btc_stack_info {
+ bool profile_notified;
+ u16 hci_version; /* stack hci version */
+ u8 num_of_link;
+ bool bt_link_exist;
+ bool sco_exist;
+ bool acl_exist;
+ bool a2dp_exist;
+ bool hid_exist;
+ u8 num_of_hid;
+ bool pan_exist;
+ bool unknown_acl_exist;
+ char min_bt_rssi;
+};
+
+struct btc_statistics {
+ u32 cnt_bind;
+ u32 cnt_init_hw_config;
+ u32 cnt_init_coex_dm;
+ u32 cnt_ips_notify;
+ u32 cnt_lps_notify;
+ u32 cnt_scan_notify;
+ u32 cnt_connect_notify;
+ u32 cnt_media_status_notify;
+ u32 cnt_special_packet_notify;
+ u32 cnt_bt_info_notify;
+ u32 cnt_periodical;
+ u32 cnt_stack_operation_notify;
+ u32 cnt_dbg_ctrl;
+};
+
+struct btc_bt_link_info {
+ bool bt_link_exist;
+ bool sco_exist;
+ bool sco_only;
+ bool a2dp_exist;
+ bool a2dp_only;
+ bool hid_exist;
+ bool hid_only;
+ bool pan_exist;
+ bool pan_only;
+};
+
+enum btc_antenna_pos {
+ BTC_ANTENNA_AT_MAIN_PORT = 0x1,
+ BTC_ANTENNA_AT_AUX_PORT = 0x2,
+};
+
+struct btc_coexist {
+ /* make sure only one adapter can bind the data context */
+ bool binded;
+ /* default adapter */
+ void *adapter;
+ struct btc_board_info board_info;
+ /* some bt info referenced by non-bt module */
+ struct btc_bt_info bt_info;
+ struct btc_stack_info stack_info;
+ enum btc_chip_interface chip_interface;
+ struct btc_bt_link_info bt_link_info;
+
+ bool initilized;
+ bool stop_coex_dm;
+ bool manual_control;
+ u8 *cli_buf;
+ struct btc_statistics statistics;
+ u8 pwr_mode_val[10];
+
+ /* function pointers - io related */
+ bfp_btc_r1 btc_read_1byte;
+ bfp_btc_w1 btc_write_1byte;
+ bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
+ bfp_btc_r2 btc_read_2byte;
+ bfp_btc_w2 btc_write_2byte;
+ bfp_btc_r4 btc_read_4byte;
+ bfp_btc_w4 btc_write_4byte;
+
+ bfp_btc_set_bb_reg btc_set_bb_reg;
+ bfp_btc_get_bb_reg btc_get_bb_reg;
+
+
+ bfp_btc_set_rf_reg btc_set_rf_reg;
+ bfp_btc_get_rf_reg btc_get_rf_reg;
+
+ bfp_btc_fill_h2c btc_fill_h2c;
+
+ bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
+
+ bfp_btc_get btc_get;
+ bfp_btc_set btc_set;
+};
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
+
+extern struct btc_coexist gl_bt_coexist;
+
+bool exhalbtc_initlize_variables(struct rtl_priv *adapter);
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist);
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+ enum _RT_MEDIA_STATUS media_status);
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
+ u8 length);
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
+void exhalbtc_periodical(struct btc_coexist *btcoexist);
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
+ u8 *data);
+void exhalbtc_stack_update_profile_info(void);
+void exhalbtc_set_hci_version(u16 hci_version);
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
+void exhalbtc_update_min_bt_rssi(char bt_rssi);
+void exhalbtc_set_bt_exist(bool bt_exist);
+void exhalbtc_set_chip_type(u8 chip_type);
+void exhalbtc_set_ant_num(u8 type, u8 ant_num);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
+ u8 *rssi_wifi, u8 *rssi_bt);
+void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
+void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
new file mode 100644
index 000000000000..0ab94fe4cbbe
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
@@ -0,0 +1,218 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2013 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "rtl_btc.h"
+#include "halbt_precomp.h"
+
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+static struct rtl_btc_ops rtl_btc_operation = {
+ .btc_init_variables = rtl_btc_init_variables,
+ .btc_init_hal_vars = rtl_btc_init_hal_vars,
+ .btc_init_hw_config = rtl_btc_init_hw_config,
+ .btc_ips_notify = rtl_btc_ips_notify,
+ .btc_scan_notify = rtl_btc_scan_notify,
+ .btc_connect_notify = rtl_btc_connect_notify,
+ .btc_mediastatus_notify = rtl_btc_mediastatus_notify,
+ .btc_periodical = rtl_btc_periodical,
+ .btc_halt_notify = rtl_btc_halt_notify,
+ .btc_btinfo_notify = rtl_btc_btinfo_notify,
+ .btc_is_limited_dig = rtl_btc_is_limited_dig,
+ .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
+ .btc_is_bt_disabled = rtl_btc_is_bt_disabled,
+};
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
+{
+ exhalbtc_initlize_variables(rtlpriv);
+}
+
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+{
+ u8 ant_num;
+ u8 bt_exist;
+ u8 bt_type;
+
+ ant_num = rtl_get_hwpg_ant_num(rtlpriv);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ "%s, antNum is %d\n", __func__, ant_num);
+
+ bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ "%s, bt_exist is %d\n", __func__, bt_exist);
+ exhalbtc_set_bt_exist(bt_exist);
+
+ bt_type = rtl_get_hwpg_bt_type(rtlpriv);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s, bt_type is %d\n",
+ __func__, bt_type);
+ exhalbtc_set_chip_type(bt_type);
+
+ exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
+}
+
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
+{
+ exhalbtc_init_hw_config(&gl_bt_coexist);
+ exhalbtc_init_coex_dm(&gl_bt_coexist);
+}
+
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
+{
+ exhalbtc_ips_notify(&gl_bt_coexist, type);
+}
+
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
+{
+ exhalbtc_scan_notify(&gl_bt_coexist, scantype);
+}
+
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
+{
+ exhalbtc_connect_notify(&gl_bt_coexist, action);
+}
+
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
+ enum _RT_MEDIA_STATUS mstatus)
+{
+ exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
+}
+
+void rtl_btc_periodical(struct rtl_priv *rtlpriv)
+{
+ exhalbtc_periodical(&gl_bt_coexist);
+}
+
+void rtl_btc_halt_notify(void)
+{
+ exhalbtc_halt_notify(&gl_bt_coexist);
+}
+
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
+{
+ exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
+}
+
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
+{
+ return gl_bt_coexist.bt_info.limited_dig;
+}
+
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
+{
+ bool bt_change_edca = false;
+ u32 cur_edca_val;
+ u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b;
+ u32 edca_hs;
+ u32 edca_addr = 0x504;
+
+ cur_edca_val = rtl_read_dword(rtlpriv, edca_addr);
+ if (halbtc_is_wifi_uplink(rtlpriv)) {
+ if (cur_edca_val != edca_bt_hs_uplink) {
+ edca_hs = edca_bt_hs_uplink;
+ bt_change_edca = true;
+ }
+ } else {
+ if (cur_edca_val != edca_bt_hs_downlink) {
+ edca_hs = edca_bt_hs_downlink;
+ bt_change_edca = true;
+ }
+ }
+
+ if (bt_change_edca)
+ rtl_write_dword(rtlpriv, edca_addr, edca_hs);
+
+ return true;
+}
+
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
+{
+ if (gl_bt_coexist.bt_info.bt_disabled)
+ return true;
+ else
+ return false;
+}
+
+struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
+{
+ return &rtl_btc_operation;
+}
+EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+{
+ u8 num;
+
+ if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+ num = 2;
+ else
+ num = 1;
+
+ return num;
+}
+
+enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum _RT_MEDIA_STATUS m_status = RT_MEDIA_DISCONNECT;
+
+ u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+ if (bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ m_status = RT_MEDIA_CONNECT;
+
+ return m_status;
+}
+
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.btcoexist;
+}
+
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+{
+ return rtlpriv->btcoexist.btc_info.bt_type;
+}
+
+MODULE_AUTHOR("Page He <page_he@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+
+static int __init rtl_btcoexist_module_init(void)
+{
+ return 0;
+}
+
+static void __exit rtl_btcoexist_module_exit(void)
+{
+ return;
+}
+
+module_init(rtl_btcoexist_module_init);
+module_exit(rtl_btcoexist_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
new file mode 100644
index 000000000000..805b22cc8fc8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
@@ -0,0 +1,52 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_BTC_H__
+#define __RTL_BTC_H__
+
+#include "halbt_precomp.h"
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
+ enum _RT_MEDIA_STATUS mstatus);
+void rtl_btc_periodical(struct rtl_priv *rtlpriv);
+void rtl_btc_halt_notify(void);
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length);
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
+
+struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
+enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 2d337a0c3df0..4ec424f26672 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -36,6 +36,66 @@
#include <linux/export.h>
+void rtl_addr_delay(u32 addr)
+{
+ if (addr == 0xfe)
+ mdelay(50);
+ else if (addr == 0xfd)
+ mdelay(5);
+ else if (addr == 0xfc)
+ mdelay(1);
+ else if (addr == 0xfb)
+ udelay(50);
+ else if (addr == 0xfa)
+ udelay(5);
+ else if (addr == 0xf9)
+ udelay(1);
+}
+EXPORT_SYMBOL(rtl_addr_delay);
+
+void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
+ u32 mask, u32 data)
+{
+ if (addr == 0xfe) {
+ mdelay(50);
+ } else if (addr == 0xfd) {
+ mdelay(5);
+ } else if (addr == 0xfc) {
+ mdelay(1);
+ } else if (addr == 0xfb) {
+ udelay(50);
+ } else if (addr == 0xfa) {
+ udelay(5);
+ } else if (addr == 0xf9) {
+ udelay(1);
+ } else {
+ rtl_set_rfreg(hw, rfpath, addr, mask, data);
+ udelay(1);
+ }
+}
+EXPORT_SYMBOL(rtl_rfreg_delay);
+
+void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
+{
+ if (addr == 0xfe) {
+ mdelay(50);
+ } else if (addr == 0xfd) {
+ mdelay(5);
+ } else if (addr == 0xfc) {
+ mdelay(1);
+ } else if (addr == 0xfb) {
+ udelay(50);
+ } else if (addr == 0xfa) {
+ udelay(5);
+ } else if (addr == 0xf9) {
+ udelay(1);
+ } else {
+ rtl_set_bbreg(hw, addr, MASKDWORD, data);
+ udelay(1);
+ }
+}
+EXPORT_SYMBOL(rtl_bb_delay);
+
void rtl_fw_cb(const struct firmware *firmware, void *context)
{
struct ieee80211_hw *hw = context;
@@ -475,20 +535,40 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 rx_conf;
*new_flags &= RTL_SUPPORTED_FILTERS;
if (!changed_flags)
return;
+ /* if ssid not set to hw don't check bssid
+ * here just used for linked scanning, & linked
+ * and nolink check bssid is set in set network_type */
+ if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
+ (mac->link_state >= MAC80211_LINKED)) {
+ if (mac->opmode != NL80211_IFTYPE_AP &&
+ mac->opmode != NL80211_IFTYPE_MESH_POINT) {
+ if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
+ rtlpriv->cfg->ops->set_chk_bssid(hw, false);
+ } else {
+ rtlpriv->cfg->ops->set_chk_bssid(hw, true);
+ }
+ }
+ }
+
+ /* must be called after set_chk_bssid since that function modifies the
+ * RCR register too. */
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf));
+
/*TODO: we disable broadcase now, so enable here */
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Enable receive multicast frame\n");
} else {
- mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB]);
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive multicast frame\n");
@@ -497,39 +577,25 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_FCSFAIL) {
if (*new_flags & FIF_FCSFAIL) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Enable receive FCS error frame\n");
} else {
- mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive FCS error frame\n");
}
}
- /* if ssid not set to hw don't check bssid
- * here just used for linked scanning, & linked
- * and nolink check bssid is set in set network_type */
- if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
- (mac->link_state >= MAC80211_LINKED)) {
- if (mac->opmode != NL80211_IFTYPE_AP &&
- mac->opmode != NL80211_IFTYPE_MESH_POINT) {
- if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
- rtlpriv->cfg->ops->set_chk_bssid(hw, false);
- } else {
- rtlpriv->cfg->ops->set_chk_bssid(hw, true);
- }
- }
- }
if (changed_flags & FIF_CONTROL) {
if (*new_flags & FIF_CONTROL) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
+ rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Enable receive control frame\n");
} else {
- mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
+ rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive control frame\n");
}
@@ -537,15 +603,17 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_OTHER_BSS) {
if (*new_flags & FIF_OTHER_BSS) {
- mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
+ rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Enable receive other BSS's frame\n");
} else {
- mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
+ rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
"Disable receive other BSS's frame\n");
}
}
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf));
}
static int rtl_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -738,6 +806,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->linked_set_reg(hw);
rcu_read_lock();
sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
+ if (!sta) {
+ pr_err("ieee80211_find_sta returned NULL\n");
+ rcu_read_unlock();
+ goto out;
+ }
if (vif->type == NL80211_IFTYPE_STATION && sta)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
@@ -892,7 +965,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->basic_rates = basic_rates;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
- (u8 *) (&basic_rates));
+ (u8 *)(&basic_rates));
}
rcu_read_unlock();
}
@@ -906,6 +979,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (bss_conf->assoc) {
if (ppsc->fwctrl_lps) {
u8 mstatus = RT_MEDIA_CONNECT;
+ u8 keep_alive = 10;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_KEEP_ALIVE,
+ &keep_alive);
+
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
&mstatus);
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 2fe46a1b4f1f..027e75374dcc 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -41,5 +41,9 @@
extern const struct ieee80211_ops rtl_ops;
void rtl_fw_cb(const struct firmware *firmware, void *context);
+void rtl_addr_delay(u32 addr);
+void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
+ u32 mask, u32 data);
+void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index d7aa165fe677..dae55257f0e8 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -811,19 +811,19 @@ done:
if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
return;
tmp_one = 1;
- rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
HW_DESC_RXBUFF_ADDR,
(u8 *)&bufferaddress);
- rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
HW_DESC_RXPKT_LEN,
(u8 *)&rtlpci->rxbuffersize);
if (index == rtlpci->rxringcount - 1)
- rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
HW_DESC_RXERO,
&tmp_one);
- rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, HW_DESC_RXOWN,
&tmp_one);
index = (index + 1) % rtlpci->rxringcount;
@@ -983,6 +983,8 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
struct sk_buff *pskb = NULL;
struct rtl_tx_desc *pdesc = NULL;
struct rtl_tcb_desc tcb_desc;
+ /*This is for new trx flow*/
+ struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
u8 temp_one = 1;
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
@@ -1004,11 +1006,12 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
info = IEEE80211_SKB_CB(pskb);
pdesc = &ring->desc[0];
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
- info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
+ (u8 *)pbuffer_desc, info, NULL, pskb,
+ BEACON_QUEUE, &tcb_desc);
__skb_queue_tail(&ring->queue, pskb);
- rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
&temp_one);
return;
@@ -1066,7 +1069,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
mac->current_ampdu_factor = 3;
/*QOS*/
- rtlpci->acm_method = eAcmWay2_SW;
+ rtlpci->acm_method = EACMWAY2_SW;
/*task */
tasklet_init(&rtlpriv->works.irq_tasklet,
@@ -1113,7 +1116,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
((i + 1) % entries) *
sizeof(*ring);
- rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)&(ring[i]),
true, HW_DESC_TX_NEXTDESC_ADDR,
(u8 *)&nextdescaddress);
}
@@ -1188,19 +1191,19 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
dev_kfree_skb_any(skb);
return 1;
}
- rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RXBUFF_ADDR,
(u8 *)&bufferaddress);
- rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RXPKT_LEN,
(u8 *)&rtlpci->
rxbuffersize);
- rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RXOWN,
&tmp_one);
}
- rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RXERO, &tmp_one);
}
return 0;
@@ -1331,7 +1334,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
- rtlpriv->cfg->ops->set_desc((u8 *) entry,
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry,
false,
HW_DESC_RXOWN,
&tmp_one);
@@ -1424,6 +1427,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
+ struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
u8 idx;
u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
unsigned long flags;
@@ -1464,17 +1468,22 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
idx = 0;
pdesc = &ring->desc[idx];
- own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
- true, HW_DESC_OWN);
+ if (rtlpriv->use_new_trx_flow) {
+ ptx_bd_desc = &ring->buffer_desc[idx];
+ } else {
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc,
+ true, HW_DESC_OWN);
- if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue));
+ if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue));
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return skb->len;
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
+ flags);
+ return skb->len;
+ }
}
if (ieee80211_is_data_qos(fc)) {
@@ -1494,17 +1503,20 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
- info, sta, skb, hw_queue, ptcb_desc);
+ (u8 *)ptx_bd_desc, info, sta, skb, hw_queue, ptcb_desc);
__skb_queue_tail(&ring->queue, skb);
- rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
- HW_DESC_OWN, &temp_one);
-
+ if (rtlpriv->use_new_trx_flow) {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
+ HW_DESC_OWN, &hw_queue);
+ } else {
+ rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
+ HW_DESC_OWN, &temp_one);
+ }
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
hw_queue != BEACON_QUEUE) {
-
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
"less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
hw_queue, ring->idx, idx,
@@ -1841,6 +1853,65 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
return true;
}
+static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+
+ ret = pci_enable_msi(rtlpci->pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, hw);
+ if (ret < 0) {
+ pci_disable_msi(rtlpci->pdev);
+ return ret;
+ }
+
+ rtlpci->using_msi = true;
+
+ RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
+ "MSI Interrupt Mode!\n");
+ return 0;
+}
+
+static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+
+ ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, hw);
+ if (ret < 0)
+ return ret;
+
+ rtlpci->using_msi = false;
+ RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
+ "Pin-based Interrupt Mode!\n");
+ return 0;
+}
+
+static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ int ret;
+
+ if (rtlpci->msi_support) {
+ ret = rtl_pci_intr_mode_msi(hw);
+ if (ret < 0)
+ ret = rtl_pci_intr_mode_legacy(hw);
+ } else {
+ ret = rtl_pci_intr_mode_legacy(hw);
+ }
+ return ret;
+}
+
int rtl_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1983,8 +2054,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
}
rtlpci = rtl_pcidev(pcipriv);
- err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
- IRQF_SHARED, KBUILD_MODNAME, hw);
+ err = rtl_pci_intr_mode_decide(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"%s: failed to register IRQ handler\n",
@@ -2052,6 +2122,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
rtlpci->irq_alloc = 0;
}
+ if (rtlpci->using_msi)
+ pci_disable_msi(rtlpci->pdev);
+
list_del(&rtlpriv->list);
if (rtlpriv->io.pci_mem_start != 0) {
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index d3262ec45d23..90174a814a6d 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -137,12 +137,22 @@ struct rtl_tx_cmd_desc {
u32 dword[16];
} __packed;
+/* In new TRX flow, Buffer_desc is new concept
+ * But TX wifi info == TX descriptor in old flow
+ * RX wifi info == RX descriptor in old flow
+ */
+struct rtl_tx_buffer_desc {
+ u32 dword[8]; /*seg = 4*/
+} __packed;
+
struct rtl8192_tx_ring {
struct rtl_tx_desc *desc;
dma_addr_t dma;
unsigned int idx;
unsigned int entries;
struct sk_buff_head queue;
+ /*add for new trx flow*/
+ struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
};
struct rtl8192_rx_ring {
@@ -199,6 +209,10 @@ struct rtl_pci {
u16 shortretry_limit;
u16 longretry_limit;
+
+ /* MSI support */
+ bool msi_support;
+ bool using_msi;
};
struct mp_adapter {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index d1c0191a195b..50504942ded1 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -32,6 +32,106 @@
#include "base.h"
#include "ps.h"
+/* Description:
+ * This routine deals with the Power Configuration CMD
+ * parsing for RTL8723/RTL8188E Series IC.
+ * Assumption:
+ * We should follow specific format that was released from HW SD.
+ */
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+ u8 faversion, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[])
+{
+ struct wlan_pwr_cfg cfg_cmd = {0};
+ bool polling_bit = false;
+ u32 ary_idx = 0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 polling_count = 0;
+ u32 max_polling_cnt = 5000;
+
+ do {
+ cfg_cmd = pwrcfgcmd[ary_idx];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x),"
+ "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+ GET_PWR_CFG_OFFSET(cfg_cmd),
+ GET_PWR_CFG_CUT_MASK(cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(cfg_cmd),
+ GET_PWR_CFG_INTF_MASK(cfg_cmd),
+ GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
+ GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
+
+ if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
+ (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
+ (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
+ switch (GET_PWR_CFG_CMD(cfg_cmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
+ break;
+ case PWR_CMD_WRITE:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
+ offset = GET_PWR_CFG_OFFSET(cfg_cmd);
+
+ /*Read the value from system register*/
+ value = rtl_read_byte(rtlpriv, offset);
+ value &= (~(GET_PWR_CFG_MASK(cfg_cmd)));
+ value |= (GET_PWR_CFG_VALUE(cfg_cmd) &
+ GET_PWR_CFG_MASK(cfg_cmd));
+
+ /*Write the value back to sytem register*/
+ rtl_write_byte(rtlpriv, offset, value);
+ break;
+ case PWR_CMD_POLLING:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
+ polling_bit = false;
+ offset = GET_PWR_CFG_OFFSET(cfg_cmd);
+
+ do {
+ value = rtl_read_byte(rtlpriv, offset);
+
+ value &= GET_PWR_CFG_MASK(cfg_cmd);
+ if (value ==
+ (GET_PWR_CFG_VALUE(cfg_cmd)
+ & GET_PWR_CFG_MASK(cfg_cmd)))
+ polling_bit = true;
+ else
+ udelay(10);
+
+ if (polling_count++ > max_polling_cnt)
+ return false;
+ } while (!polling_bit);
+ break;
+ case PWR_CMD_DELAY:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
+ if (GET_PWR_CFG_VALUE(cfg_cmd) ==
+ PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
+ else
+ mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
+ break;
+ case PWR_CMD_END:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
+ return true;
+ default:
+ RT_ASSERT(false,
+ "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
+ break;
+ }
+
+ }
+ ary_idx++;
+ } while (1);
+
+ return true;
+}
+EXPORT_SYMBOL(rtl_hal_pwrseqcmdparsing);
+
bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -659,7 +759,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
unsigned int len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_mgmt *mgmt = (void *)data;
+ struct ieee80211_mgmt *mgmt = data;
struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
u8 *pos, *end, *ie;
u16 noa_len;
@@ -758,7 +858,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
unsigned int len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_mgmt *mgmt = (void *)data;
+ struct ieee80211_mgmt *mgmt = data;
struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
u8 noa_num, index, i, noa_index = 0;
u8 *pos, *end, *ie;
@@ -850,9 +950,8 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
p2pinfo->p2p_ps_state = p2p_ps_state;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- (u8 *)(&p2p_ps_state));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ &p2p_ps_state);
p2pinfo->noa_index = 0;
p2pinfo->ctwindow = 0;
@@ -864,7 +963,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
rtlps->smart_ps = 2;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&rtlps->pwr_mode));
+ &rtlps->pwr_mode);
}
}
break;
@@ -877,12 +976,12 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
rtlps->smart_ps = 0;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&rtlps->pwr_mode));
+ &rtlps->pwr_mode);
}
}
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- (u8 *)(&p2p_ps_state));
+ &p2p_ps_state);
}
break;
case P2P_PS_SCAN:
@@ -892,7 +991,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
p2pinfo->p2p_ps_state = p2p_ps_state;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- (u8 *)(&p2p_ps_state));
+ &p2p_ps_state);
}
break;
default:
@@ -912,7 +1011,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = (void *)data;
+ struct ieee80211_hdr *hdr = data;
if (!mac->p2p)
return;
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 88bd76ea88f7..3bd41f958974 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -32,6 +32,66 @@
#define MAX_SW_LPS_SLEEP_INTV 5
+/*---------------------------------------------
+ * 3 The value of cmd: 4 bits
+ *---------------------------------------------
+ */
+#define PWR_CMD_READ 0x00
+#define PWR_CMD_WRITE 0x01
+#define PWR_CMD_POLLING 0x02
+#define PWR_CMD_DELAY 0x03
+#define PWR_CMD_END 0x04
+
+/* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+enum pwrseq_delay_unit {
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+};
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) (__PWR_CMD.offset)
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) (__PWR_CMD.cut_msk)
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) (__PWR_CMD.fab_msk)
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) (__PWR_CMD.interface_msk)
+#define GET_PWR_CFG_BASE(__PWR_CMD) (__PWR_CMD.base)
+#define GET_PWR_CFG_CMD(__PWR_CMD) (__PWR_CMD.cmd)
+#define GET_PWR_CFG_MASK(__PWR_CMD) (__PWR_CMD.msk)
+#define GET_PWR_CFG_VALUE(__PWR_CMD) (__PWR_CMD.value)
+
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[]);
+
bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
enum rf_pwrstate state_toset, u32 changesource);
bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index a98acefb8c06..ee28a1a3d010 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -260,8 +260,7 @@ static void rtl_rate_free_sta(void *rtlpriv,
kfree(rate_priv);
}
-static struct rate_control_ops rtl_rate_ops = {
- .module = NULL,
+static const struct rate_control_ops rtl_rate_ops = {
.name = "rtl_rc",
.alloc = rtl_rate_alloc,
.free = rtl_rate_free,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
index 5b194e97f4b3..a85419a37651 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
@@ -5,7 +5,6 @@ rtl8188ee-objs := \
led.o \
phy.o \
pwrseq.o \
- pwrseqcmd.o \
rf.o \
sw.o \
table.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
index a6184b6e1d57..f8daa61cf1c3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -235,7 +235,7 @@ void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw,
u8 pwr_val = 0;
u8 cck_base = rtldm->swing_idx_cck_base;
u8 cck_val = rtldm->swing_idx_cck;
- u8 ofdm_base = rtldm->swing_idx_ofdm_base;
+ u8 ofdm_base = rtldm->swing_idx_ofdm_base[0];
u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A];
if (type == 0) {
@@ -726,7 +726,7 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw)
static u64 last_rx;
long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
- if (rtlhal->oem_id == RT_CID_819x_HP) {
+ if (rtlhal->oem_id == RT_CID_819X_HP) {
u64 cur_txok_cnt = 0;
u64 cur_rxok_cnt = 0;
cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok;
@@ -851,9 +851,8 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *)(&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
@@ -912,7 +911,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_old[0] = (u8) i;
- rtldm->swing_idx_ofdm_base = (u8)i;
+ rtldm->swing_idx_ofdm_base[0] = (u8)i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
ROFDM0_XATXIQIMBAL,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
index 557bc5b8327e..4f9376ad4739 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
@@ -119,7 +119,7 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw,
enum version_8188e version, u8 *buffer, u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *buf_ptr = (u8 *)buffer;
+ u8 *buf_ptr = buffer;
u32 page_no, remain;
u32 page, offset;
@@ -213,7 +213,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
return 1;
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
- pfwdata = (u8 *)rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
"normal Firmware SIZE %d\n", fwsize);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index e06971be7df7..94cd9df98381 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -41,7 +41,6 @@
#include "fw.h"
#include "led.h"
#include "hw.h"
-#include "pwrseqcmd.h"
#include "pwrseq.h"
#define LLT_CONFIG 5
@@ -148,8 +147,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
}
if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
if (FW_PS_IS_ACK(rpwm_val)) {
isr_regaddr = REG_HISR;
content = rtl_read_dword(rtlpriv, isr_regaddr);
@@ -226,7 +224,7 @@ static void _rtl88ee_set_fw_clock_off(struct ieee80211_hw *hw,
rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
rtl_write_word(rtlpriv, REG_HISR, 0x0100);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ &rpwm_val);
spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
rtlhal->fw_clk_change_in_progress = false;
spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
@@ -274,15 +272,14 @@ static void _rtl88ee_fwlps_leave(struct ieee80211_hw *hw)
_rtl88ee_set_fw_clock_on(hw, rpwm_val, false);
rtlhal->allow_sw_to_change_hwclc = false;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
} else {
rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
}
@@ -301,7 +298,7 @@ static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
rtlhal->allow_sw_to_change_hwclc = true;
_rtl88ee_set_fw_clock_off(hw, rpwm_val);
} else {
@@ -309,9 +306,8 @@ static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ &ppsc->fwctrl_psmode);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
}
}
@@ -420,12 +416,12 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- (u8 *)(&e_aci));
+ &e_aci);
}
break; }
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *)val);
+ u8 short_preamble = (bool)*val;
reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2);
if (short_preamble) {
reg_tmp |= 0x02;
@@ -436,13 +432,13 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break; }
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_AMPDU_MIN_SPACE:{
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *)val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
@@ -465,7 +461,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *)val);
+ density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -483,7 +479,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
reg = regtoset_normal;
- factor = *((u8 *)val);
+ factor = *val;
if (factor <= 3) {
factor = (1 << (factor + 2));
if (factor > 0xf)
@@ -506,15 +502,15 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break; }
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *)val);
+ u8 e_aci = *val;
rtl88e_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
+ if (rtlpci->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
- (u8 *)(&e_aci));
+ &e_aci);
break; }
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *)val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -567,7 +563,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlpci->receive_config = ((u32 *)(val))[0];
break;
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *)(val))[0];
+ u8 retry_limit = *val;
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -580,7 +576,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *)val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *)val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl88e_phy_set_io_cmd(hw, (*(enum io_type *)val));
@@ -592,15 +588,13 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
udelay(1);
if (rpwm_val & BIT(7)) {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- (*(u8 *)val));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
} else {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- ((*(u8 *)val) | BIT(7)));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
}
break; }
case HW_VAR_H2C_FW_PWRMODE:
- rtl88e_set_fw_pwrmode_cmd(hw, (*(u8 *)val));
+ rtl88e_set_fw_pwrmode_cmd(hw, *val);
break;
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *)val);
@@ -617,7 +611,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
_rtl88ee_fwlps_leave(hw);
break; }
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *)val);
+ u8 mstatus = *val;
u8 tmp, tmp_reg422, uval;
u8 count = 0, dlbcn_count = 0;
bool recover = false;
@@ -668,10 +662,10 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
rtl_write_byte(rtlpriv, REG_CR + 1, (tmp & ~(BIT(0))));
}
- rtl88e_set_fw_joinbss_report_cmd(hw, (*(u8 *)val));
+ rtl88e_set_fw_joinbss_report_cmd(hw, *val);
break; }
case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
- rtl88e_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
+ rtl88e_set_p2p_ps_offload_cmd(hw, *val);
break;
case HW_VAR_AID:{
u16 u2btmp;
@@ -681,7 +675,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->assoc_id));
break; }
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *)(val))[0];
+ u8 btype_ibss = *val;
if (btype_ibss == true)
_rtl88ee_stop_tx_beacon(hw);
@@ -815,11 +809,11 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
/* HW Power on sequence */
- if (!rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
- PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
- Rtl8188E_NIC_ENABLE_FLOW)) {
+ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
+ PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
+ Rtl8188E_NIC_ENABLE_FLOW)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init MAC Fail as rtl88_hal_pwrseqcmdparsing\n");
+ "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
return false;
}
@@ -1025,9 +1019,20 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
bool rtstatus = true;
int err = 0;
u8 tmp_u1b, u1byte;
+ unsigned long flags;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n");
rtlpriv->rtlhal.being_init_adapter = true;
+ /* As this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1);
@@ -1043,7 +1048,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
if (rtstatus != true) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
- return err;
+ goto exit;
}
err = rtl88e_download_fw(hw, false);
@@ -1051,8 +1056,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- rtlhal->fw_ready = false;
- return err;
+ goto exit;
} else {
rtlhal->fw_ready = true;
}
@@ -1097,7 +1101,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
if (ppsc->rfpwr_state == ERFON) {
if ((rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) ||
((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) &&
- (rtlhal->oem_id == RT_CID_819x_HP))) {
+ (rtlhal->oem_id == RT_CID_819X_HP))) {
rtl88e_phy_set_rfpath_switch(hw, true);
rtlpriv->dm.fat_table.rx_idle_ant = MAIN_ANT;
} else {
@@ -1135,10 +1139,12 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
}
rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128));
rtl88e_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpriv->rtlhal.being_init_adapter = false;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n",
err);
- return 0;
+ return err;
}
static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw)
@@ -1235,12 +1241,13 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 reg_rcr = rtlpci->receive_config;
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid == true) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1345,9 +1352,9 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
}
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF);
- rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_PCI_MSK,
- Rtl8188E_NIC_LPS_ENTER_FLOW);
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK,
+ Rtl8188E_NIC_LPS_ENTER_FLOW);
rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
@@ -1361,8 +1368,8 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_32K_CTRL);
rtl_write_byte(rtlpriv, REG_32K_CTRL, (u1b_tmp & (~BIT(0))));
- rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
- PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW);
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW);
u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3))));
@@ -1816,7 +1823,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
/*customer ID*/
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
if (rtlefuse->eeprom_oemid == 0xFF)
rtlefuse->eeprom_oemid = 0;
@@ -1833,7 +1840,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
"dev_addr: %pM\n", rtlefuse->dev_addr);
/*channel plan */
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
/* set channel paln to world wide 13 */
rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
/*tx power*/
@@ -1845,7 +1852,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag,
hwinfo);
/*board type*/
- rtlefuse->board_type = (((*(u8 *)&hwinfo[jj]) & 0xE0) >> 5);
+ rtlefuse->board_type = (hwinfo[jj] & 0xE0) >> 5;
/*Wake on wlan*/
rtlefuse->wowlan_enable = ((hwinfo[kk] & 0x40) >> 6);
/*parse xtal*/
@@ -1872,15 +1879,15 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
case EEPROM_CID_DEFAULT:
if (rtlefuse->eeprom_did == 0x8179) {
if (rtlefuse->eeprom_svid == 0x1025) {
- rtlhal->oem_id = RT_CID_819x_Acer;
+ rtlhal->oem_id = RT_CID_819X_ACER;
} else if ((rtlefuse->eeprom_svid == 0x10EC &&
rtlefuse->eeprom_smid == 0x0179) ||
(rtlefuse->eeprom_svid == 0x17AA &&
rtlefuse->eeprom_smid == 0x0179)) {
- rtlhal->oem_id = RT_CID_819x_Lenovo;
+ rtlhal->oem_id = RT_CID_819X_LENOVO;
} else if (rtlefuse->eeprom_svid == 0x103c &&
rtlefuse->eeprom_smid == 0x197d) {
- rtlhal->oem_id = RT_CID_819x_HP;
+ rtlhal->oem_id = RT_CID_819X_HP;
} else {
rtlhal->oem_id = RT_CID_DEFAULT;
}
@@ -1892,7 +1899,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
rtlhal->oem_id = RT_CID_TOSHIBA;
break;
case EEPROM_CID_QMI:
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
break;
case EEPROM_CID_WHQL:
default:
@@ -1911,14 +1918,14 @@ static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
pcipriv->ledctl.led_opendrain = true;
switch (rtlhal->oem_id) {
- case RT_CID_819x_HP:
+ case RT_CID_819X_HP:
pcipriv->ledctl.led_opendrain = true;
break;
- case RT_CID_819x_Lenovo:
+ case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
case RT_CID_TOSHIBA:
case RT_CID_CCX:
- case RT_CID_819x_Acer:
+ case RT_CID_819X_ACER:
case RT_CID_WHQL:
default:
break;
@@ -2211,8 +2218,7 @@ void rtl88ee_update_channel_access_setting(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 sifs_timer;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
index d67f9c731cc4..1cd6c16d597e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
@@ -29,6 +29,7 @@
#include "../wifi.h"
#include "../pci.h"
+#include "../core.h"
#include "../ps.h"
#include "reg.h"
#include "def.h"
@@ -151,18 +152,7 @@ static bool config_bb_with_pgheader(struct ieee80211_hw *hw,
v2 = table_pg[i + 1];
if (v1 < 0xcdcdcdcd) {
- if (table_pg[i] == 0xfe)
- mdelay(50);
- else if (table_pg[i] == 0xfd)
- mdelay(5);
- else if (table_pg[i] == 0xfc)
- mdelay(1);
- else if (table_pg[i] == 0xfb)
- udelay(50);
- else if (table_pg[i] == 0xfa)
- udelay(5);
- else if (table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(table_pg[i]);
store_pwrindex_offset(hw, table_pg[i],
table_pg[i + 1],
@@ -672,24 +662,9 @@ static void _rtl8188e_config_rf_reg(struct ieee80211_hw *hw,
u32 addr, u32 data, enum radio_path rfpath,
u32 regaddr)
{
- if (addr == 0xffe) {
- mdelay(50);
- } else if (addr == 0xfd) {
- mdelay(5);
- } else if (addr == 0xfc) {
- mdelay(1);
- } else if (addr == 0xfb) {
- udelay(50);
- } else if (addr == 0xfa) {
- udelay(5);
- } else if (addr == 0xf9) {
- udelay(1);
- } else {
- rtl_set_rfreg(hw, rfpath, regaddr,
- RFREG_OFFSET_MASK,
- data);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, regaddr,
+ RFREG_OFFSET_MASK,
+ data);
}
static void rtl88_config_s(struct ieee80211_hw *hw,
@@ -702,28 +677,6 @@ static void rtl88_config_s(struct ieee80211_hw *hw,
addr | maskforphyset);
}
-static void _rtl8188e_config_bb_reg(struct ieee80211_hw *hw,
- u32 addr, u32 data)
-{
- if (addr == 0xfe) {
- mdelay(50);
- } else if (addr == 0xfd) {
- mdelay(5);
- } else if (addr == 0xfc) {
- mdelay(1);
- } else if (addr == 0xfb) {
- udelay(50);
- } else if (addr == 0xfa) {
- udelay(5);
- } else if (addr == 0xf9) {
- udelay(1);
- } else {
- rtl_set_bbreg(hw, addr, MASKDWORD, data);
- udelay(1);
- }
-}
-
-
#define NEXT_PAIR(v1, v2, i) \
do { \
i += 2; v1 = array_table[i]; \
@@ -795,7 +748,7 @@ static void set_baseband_phy_config(struct ieee80211_hw *hw)
v1 = array_table[i];
v2 = array_table[i + 1];
if (v1 < 0xcdcdcdcd) {
- _rtl8188e_config_bb_reg(hw, v1, v2);
+ rtl_bb_delay(hw, v1, v2);
} else {/*This line is the start line of branch.*/
if (!check_cond(hw, array_table[i])) {
/*Discard the following (offset, data) pairs*/
@@ -811,7 +764,7 @@ static void set_baseband_phy_config(struct ieee80211_hw *hw)
while (v2 != 0xDEAD &&
v2 != 0xCDEF &&
v2 != 0xCDCD && i < arraylen - 2) {
- _rtl8188e_config_bb_reg(hw, v1, v2);
+ rtl_bb_delay(hw, v1, v2);
NEXT_PAIR(v1, v2, i);
}
@@ -1002,7 +955,7 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
}
- if (rtlhal->oem_id == RT_CID_819x_HP)
+ if (rtlhal->oem_id == RT_CID_819X_HP)
rtl88_config_s(hw, 0x52, 0x7E4BD);
break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
index 028ec6dd52b4..32e135ab9a63 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
@@ -30,7 +30,6 @@
#ifndef __RTL8723E_PWRSEQ_H__
#define __RTL8723E_PWRSEQ_H__
-#include "pwrseqcmd.h"
/*
Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
There are 6 HW Power States:
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
index d849abf7d94a..7af85cfa8f87 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
@@ -2215,22 +2215,6 @@
#define BWORD1 0xc
#define BWORD 0xf
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-
-#define MASK4BITS 0x0f
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
#define BENABLE 0x1
#define BDISABLE 0x0
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index 347af1e4f438..1b4101bf9974 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -93,6 +93,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
rtl8188ee_bt_reg_init(hw);
+ rtlpci->msi_support = true;
rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->dm.dm_flag = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index aece6c9cccf1..06ef47cd6203 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -452,7 +452,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
/* During testing, hdr was NULL */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
@@ -489,16 +489,15 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- struct sk_buff *skb,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- u8 *pdesc = (u8 *)pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -717,7 +716,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
@@ -734,7 +733,8 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
pdesc, TX_DESC_SIZE);
}
-void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx == true) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
index 21ca33a7c770..8c2609412d2c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
@@ -777,15 +777,15 @@ struct rx_desc_88e {
void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta,
- struct sk_buff *skb,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *status,
struct ieee80211_rx_status *rx_status,
u8 *pdesc, struct sk_buff *skb);
-void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 2eb0b38384dd..55adf043aef7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -319,7 +319,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 e_aci = *(val);
rtl92c_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
+ if (rtlpci->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
(&e_aci));
@@ -476,7 +476,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
- rtl92c_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
+ rtl92c_set_p2p_ps_offload_cmd(hw, *val);
break;
case HW_VAR_AID:{
u16 u2btmp;
@@ -521,30 +521,32 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ HW_VAR_SET_RPWM,
+ &rpwm_val);
} else {
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ HW_VAR_SET_RPWM,
+ &rpwm_val);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_FW_PSMODE_STATUS,
(u8 *)(&fw_current_inps));
}
break; }
+ case HW_VAR_KEEP_ALIVE:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %d not processed\n", variable);
break;
}
}
@@ -1214,11 +1216,13 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1734,7 +1738,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->eeprom_did == 0x8176) {
if ((rtlefuse->eeprom_svid == 0x103C &&
rtlefuse->eeprom_smid == 0x1629))
- rtlhal->oem_id = RT_CID_819x_HP;
+ rtlhal->oem_id = RT_CID_819X_HP;
else
rtlhal->oem_id = RT_CID_DEFAULT;
} else {
@@ -1745,7 +1749,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
rtlhal->oem_id = RT_CID_TOSHIBA;
break;
case EEPROM_CID_QMI:
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
break;
case EEPROM_CID_WHQL:
default:
@@ -1764,14 +1768,14 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
switch (rtlhal->oem_id) {
- case RT_CID_819x_HP:
+ case RT_CID_819X_HP:
pcipriv->ledctl.led_opendrain = true;
break;
- case RT_CID_819x_Lenovo:
+ case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
case RT_CID_TOSHIBA:
case RT_CID_CCX:
- case RT_CID_819x_Acer:
+ case RT_CID_819X_ACER:
case RT_CID_WHQL:
default:
break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 73262ca3864b..98b22303c84d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "hw.h"
@@ -198,18 +199,7 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
}
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
- if (phy_regarray_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table[i]);
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
@@ -245,18 +235,7 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
- if (phy_regarray_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table_pg[i]);
_rtl92c_store_pwrIndex_diffrate_offset(hw,
phy_regarray_table_pg[i],
@@ -305,46 +284,16 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
- if (radioa_array_table[i] == 0xfe)
- mdelay(50);
- else if (radioa_array_table[i] == 0xfd)
- mdelay(5);
- else if (radioa_array_table[i] == 0xfc)
- mdelay(1);
- else if (radioa_array_table[i] == 0xfb)
- udelay(50);
- else if (radioa_array_table[i] == 0xfa)
- udelay(5);
- else if (radioa_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
- RFREG_OFFSET_MASK,
- radioa_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
}
break;
case RF90_PATH_B:
for (i = 0; i < radiob_arraylen; i = i + 2) {
- if (radiob_array_table[i] == 0xfe) {
- mdelay(50);
- } else if (radiob_array_table[i] == 0xfd)
- mdelay(5);
- else if (radiob_array_table[i] == 0xfc)
- mdelay(1);
- else if (radiob_array_table[i] == 0xfb)
- udelay(50);
- else if (radiob_array_table[i] == 0xfa)
- udelay(5);
- else if (radiob_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
- RFREG_OFFSET_MASK,
- radiob_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
}
break;
case RF90_PATH_C:
@@ -355,6 +304,8 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
break;
+ default:
+ break;
}
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index 8922ecb47ad2..ed703a1b3b7c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -2044,22 +2044,6 @@
#define BWORD1 0xc
#define BWORD 0xf
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-
-#define MASK4BITS 0x0f
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
#define BENABLE 0x1
#define BDISABLE 0x0
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52abf0a862fa..8f04817cb7ec 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -393,7 +393,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
/* In testing, hdr was NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
@@ -426,7 +426,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
@@ -666,7 +666,8 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
"H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
}
-void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index a7cdd514cb2e..9a39ec4204dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -711,8 +711,8 @@ struct rx_desc_92c {
} __packed;
void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr,
- u8 *pdesc, struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
@@ -720,7 +720,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
u8 *pdesc, struct sk_buff *skb);
-void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 468bf73cc883..68b5c7e92cfb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -394,7 +394,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->eeprom_did == 0x8176) {
if ((rtlefuse->eeprom_svid == 0x103C &&
rtlefuse->eeprom_smid == 0x1629))
- rtlhal->oem_id = RT_CID_819x_HP;
+ rtlhal->oem_id = RT_CID_819X_HP;
else
rtlhal->oem_id = RT_CID_DEFAULT;
} else {
@@ -405,7 +405,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
rtlhal->oem_id = RT_CID_TOSHIBA;
break;
case EEPROM_CID_QMI:
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
break;
case EEPROM_CID_WHQL:
default:
@@ -423,14 +423,14 @@ static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
switch (rtlhal->oem_id) {
- case RT_CID_819x_HP:
+ case RT_CID_819X_HP:
usb_priv->ledctl.led_opendrain = true;
break;
- case RT_CID_819x_Lenovo:
+ case RT_CID_819X_LENOVO:
case RT_CID_DEFAULT:
case RT_CID_TOSHIBA:
case RT_CID_CCX:
- case RT_CID_819x_Acer:
+ case RT_CID_819X_ACER:
case RT_CID_WHQL:
default:
break;
@@ -985,6 +985,17 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
int err = 0;
static bool iqk_initialized;
+ unsigned long flags;
+
+ /* As this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
err = _rtl92cu_init_mac(hw);
@@ -997,7 +1008,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- return err;
+ goto exit;
}
rtlhal->last_hmeboxnum = 0; /* h2c */
_rtl92cu_phy_param_tab_init(hw);
@@ -1034,6 +1045,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
_InitPABias(hw);
_update_mac_setting(hw);
rtl92c_dm_init(hw);
+exit:
+ local_irq_restore(flags);
return err;
}
@@ -1379,11 +1392,13 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid) {
u8 tmp;
if (IS_NORMAL_CHIP(rtlhal->version)) {
@@ -1795,7 +1810,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
e_aci);
break;
}
- if (rtlusb->acm_method != eAcmWay2_SW)
+ if (rtlusb->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL, &e_aci);
break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 0c09240eadcc..9831ff1128ca 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -188,18 +189,7 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
}
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
- if (phy_regarray_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table[i]);
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
@@ -236,18 +226,7 @@ bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
- if (phy_regarray_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table_pg[i]);
_rtl92c_store_pwrIndex_diffrate_offset(hw,
phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
@@ -294,46 +273,16 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
- if (radioa_array_table[i] == 0xfe)
- mdelay(50);
- else if (radioa_array_table[i] == 0xfd)
- mdelay(5);
- else if (radioa_array_table[i] == 0xfc)
- mdelay(1);
- else if (radioa_array_table[i] == 0xfb)
- udelay(50);
- else if (radioa_array_table[i] == 0xfa)
- udelay(5);
- else if (radioa_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
- RFREG_OFFSET_MASK,
- radioa_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
}
break;
case RF90_PATH_B:
for (i = 0; i < radiob_arraylen; i = i + 2) {
- if (radiob_array_table[i] == 0xfe) {
- mdelay(50);
- } else if (radiob_array_table[i] == 0xfd)
- mdelay(5);
- else if (radiob_array_table[i] == 0xfc)
- mdelay(1);
- else if (radiob_array_table[i] == 0xfb)
- udelay(50);
- else if (radiob_array_table[i] == 0xfa)
- udelay(5);
- else if (radiob_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
- RFREG_OFFSET_MASK,
- radiob_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
}
break;
case RF90_PATH_C:
@@ -344,6 +293,8 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
break;
+ default:
+ break;
}
return true;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 1bc21ccfa71b..035e0dc3922c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -495,7 +495,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 queue_index,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index 725c53accc58..fd8051dcd98a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -420,7 +420,7 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
struct sk_buff_head *);
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 queue_index,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 7908e1c85819..304c443b89b2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -194,15 +194,15 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
- ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, BMASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, BMASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, BMASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, BMASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
falsealm_cnt->cnt_rate_illegal +
@@ -214,9 +214,9 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
/* hold cck counter */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, BMASKBYTE0);
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
falsealm_cnt->cnt_cck_fail = ret_value;
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, BMASKBYTE3);
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
@@ -331,11 +331,11 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
} else {
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
@@ -722,7 +722,7 @@ static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"===> Rx Gain %x\n", u4tmp);
for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
- rtl_set_rfreg(hw, i, 0x3C, BRFREGOFFSETMASK,
+ rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
(rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
}
@@ -737,7 +737,7 @@ static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
/* Query CCK default setting From 0xa24 */
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
- BMASKDWORD) & BMASKCCK;
+ MASKDWORD) & MASKCCK;
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
for (i = 0; i < CCK_TABLE_LENGTH; i++) {
if (rtlpriv->dm.cck_inch14) {
@@ -896,9 +896,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
rf = 1;
if (thermalvalue) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD) & BMASKOFDM_D;
+ MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d == (ofdmswing_table[i] & BMASKOFDM_D)) {
+ if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
@@ -910,10 +910,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
}
if (is2t) {
ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
- BMASKDWORD) & BMASKOFDM_D;
+ MASKDWORD) & MASKOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
if (ele_d ==
- (ofdmswing_table[i] & BMASKOFDM_D)) {
+ (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[1] = (u8) i;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
DBG_LOUD,
@@ -1091,10 +1091,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
16) | ele_a;
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD, value32);
+ MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
+ rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
@@ -1103,10 +1103,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
} else {
rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD,
+ MASKDWORD,
ofdmswing_table
[(u8)ofdm_index[0]]);
- rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
+ rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(24), 0x00);
@@ -1204,21 +1204,21 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
ele_a;
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
- BMASKDWORD, value32);
+ MASKDWORD, value32);
value32 = (ele_c & 0x000003C0) >> 6;
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
- BMASKH4BITS, value32);
+ MASKH4BITS, value32);
value32 = ((val_x * ele_d) >> 7) & 0x01;
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), value32);
} else {
rtl_set_bbreg(hw,
ROFDM0_XBTxIQIMBALANCE,
- BMASKDWORD,
+ MASKDWORD,
ofdmswing_table
[(u8) ofdm_index[1]]);
rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
- BMASKH4BITS, 0x00);
+ MASKH4BITS, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), 0x00);
}
@@ -1229,10 +1229,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
}
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
- rtl_get_bbreg(hw, 0xc80, BMASKDWORD),
- rtl_get_bbreg(hw, 0xc94, BMASKDWORD),
+ rtl_get_bbreg(hw, 0xc80, MASKDWORD),
+ rtl_get_bbreg(hw, 0xc94, MASKDWORD),
rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
- BRFREGOFFSETMASK));
+ RFREG_OFFSET_MASK));
}
if ((delta_iqk > rtlefuse->delta_iqk) &&
(rtlefuse->delta_iqk != 0)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index c4a7db9135d6..2b08671004a0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -318,7 +318,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_AC_PARAM: {
u8 e_aci = *val;
rtl92d_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
+ if (rtlpci->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
&e_aci);
break;
@@ -985,9 +985,9 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
/* set default value after initialize RF, */
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
- RF_CHNLBW, BRFREGOFFSETMASK);
+ RF_CHNLBW, RFREG_OFFSET_MASK);
rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
- RF_CHNLBW, BRFREGOFFSETMASK);
+ RF_CHNLBW, RFREG_OFFSET_MASK);
/*---- Set CCK and OFDM Block "ON"----*/
if (rtlhal->current_bandtype == BAND_ON_2_4G)
@@ -1035,7 +1035,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
tmp_rega = rtl_get_rfreg(hw,
(enum radio_path)RF90_PATH_A,
- 0x2a, BMASKDWORD);
+ 0x2a, MASKDWORD);
if (((tmp_rega & BIT(11)) == BIT(11)))
break;
@@ -1138,11 +1138,13 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 reg_rcr = rtlpci->receive_config;
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
@@ -1332,13 +1334,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
/* c. ========RF OFF sequence========== */
/* 0x88c[23:20] = 0xf. */
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
/* APSD_CTRL 0x600[7:0] = 0x40 */
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
/* Close antenna 0,0xc04,0xd04 */
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0);
rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0);
/* SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB state machine */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 13196cc4b1d3..3d1f0dd4e52d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -242,7 +243,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
else if (rtlhal->during_mac0init_radiob)
/* mac0 use phy1 write radio_b. */
dbi_direct = BIT(3) | BIT(2);
- if (bitmask != BMASKDWORD) {
+ if (bitmask != MASKDWORD) {
if (rtlhal->during_mac1init_radioa ||
rtlhal->during_mac0init_radiob)
originalvalue = rtl92de_read_dword_dbi(hw,
@@ -275,20 +276,20 @@ static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
u32 retvalue;
newoffset = offset;
- tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD);
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
if (rfpath == RF90_PATH_A)
tmplong2 = tmplong;
else
- tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD);
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
(newoffset << 23) | BLSSIREADEDGE;
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong & (~BLSSIREADEDGE));
udelay(10);
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD, tmplong2);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
udelay(50);
udelay(50);
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
tmplong | BLSSIREADEDGE);
udelay(10);
if (rfpath == RF90_PATH_A)
@@ -321,7 +322,7 @@ static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
/* T65 RF */
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
- rtl_set_bbreg(hw, pphyreg->rf3wire_offset, BMASKDWORD, data_and_addr);
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
@@ -362,7 +363,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
return;
spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
- if (bitmask != BRFREGOFFSETMASK) {
+ if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92d_phy_rf_serial_read(hw,
rfpath, regaddr);
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
@@ -567,19 +568,8 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
" ===> phy:Rtl819XPHY_REG_Array_PG\n");
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
- if (phy_regarray_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table[i] == 0xf9)
- udelay(1);
- rtl_set_bbreg(hw, phy_regarray_table[i], BMASKDWORD,
+ rtl_addr_delay(phy_regarray_table[i]);
+ rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
@@ -591,7 +581,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
if (rtlhal->interfaceindex == 0) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i],
- BMASKDWORD,
+ MASKDWORD,
agctab_array_table[i + 1]);
/* Add 1us delay between BB/RF register
* setting. */
@@ -607,7 +597,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i],
- BMASKDWORD,
+ MASKDWORD,
agctab_array_table[i + 1]);
/* Add 1us delay between BB/RF register
* setting. */
@@ -623,7 +613,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
for (i = 0; i < agctab_5garraylen; i = i + 2) {
rtl_set_bbreg(hw,
agctab_5garray_table[i],
- BMASKDWORD,
+ MASKDWORD,
agctab_5garray_table[i + 1]);
/* Add 1us delay between BB/RF registeri
* setting. */
@@ -705,18 +695,7 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg = rtl8192de_phy_reg_array_pg;
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
- if (phy_regarray_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table_pg[i]);
_rtl92d_store_pwrindex_diffrate_offset(hw,
phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
@@ -843,54 +822,16 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
- if (radioa_array_table[i] == 0xfe) {
- mdelay(50);
- } else if (radioa_array_table[i] == 0xfd) {
- /* delay_ms(5); */
- mdelay(5);
- } else if (radioa_array_table[i] == 0xfc) {
- /* delay_ms(1); */
- mdelay(1);
- } else if (radioa_array_table[i] == 0xfb) {
- udelay(50);
- } else if (radioa_array_table[i] == 0xfa) {
- udelay(5);
- } else if (radioa_array_table[i] == 0xf9) {
- udelay(1);
- } else {
- rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
- BRFREGOFFSETMASK,
- radioa_array_table[i + 1]);
- /* Add 1us delay between BB/RF register set. */
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
}
break;
case RF90_PATH_B:
for (i = 0; i < radiob_arraylen; i = i + 2) {
- if (radiob_array_table[i] == 0xfe) {
- /* Delay specific ms. Only RF configuration
- * requires delay. */
- mdelay(50);
- } else if (radiob_array_table[i] == 0xfd) {
- /* delay_ms(5); */
- mdelay(5);
- } else if (radiob_array_table[i] == 0xfc) {
- /* delay_ms(1); */
- mdelay(1);
- } else if (radiob_array_table[i] == 0xfb) {
- udelay(50);
- } else if (radiob_array_table[i] == 0xfa) {
- udelay(5);
- } else if (radiob_array_table[i] == 0xf9) {
- udelay(1);
- } else {
- rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
- BRFREGOFFSETMASK,
- radiob_array_table[i + 1]);
- /* Add 1us delay between BB/RF register set. */
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
}
break;
case RF90_PATH_C:
@@ -911,13 +852,13 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
rtlphy->default_initialgain[0] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, BMASKBYTE0);
+ (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[1] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, BMASKBYTE0);
+ (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[2] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, BMASKBYTE0);
+ (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[3] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, BMASKBYTE0);
+ (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
rtlphy->default_initialgain[0],
@@ -925,9 +866,9 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[2],
rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
- BMASKBYTE0);
+ MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
- BMASKDWORD);
+ MASKDWORD);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
"Default framesync (0x%x) = 0x%x\n",
ROFDM0_RXDETECTOR3, rtlphy->framesync);
@@ -1106,7 +1047,7 @@ static void _rtl92d_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
{
rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0);
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x00);
rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x0);
}
@@ -1168,7 +1109,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 imr_num = MAX_RF_IMR_INDEX;
- u32 rfmask = BRFREGOFFSETMASK;
+ u32 rfmask = RFREG_OFFSET_MASK;
u8 group, i;
unsigned long flag = 0;
@@ -1211,7 +1152,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
for (i = 0; i < imr_num; i++) {
rtl_set_rfreg(hw, (enum radio_path)rfpath,
rf_reg_for_5g_swchnl_normal[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_imr_param_normal[0][0][i]);
}
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
@@ -1329,7 +1270,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
if (i == 0 && (rtlhal->macphymode == DUALMAC_DUALPHY)) {
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_5g[i],
- BRFREGOFFSETMASK, 0xE439D);
+ RFREG_OFFSET_MASK, 0xE439D);
} else if (rf_reg_for_c_cut_5g[i] == RF_SYN_G4) {
u4tmp2 = (rf_reg_pram_c_5g[index][i] &
0x7FF) | (u4tmp << 11);
@@ -1337,11 +1278,11 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
u4tmp2 &= ~(BIT(7) | BIT(6));
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_5g[i],
- BRFREGOFFSETMASK, u4tmp2);
+ RFREG_OFFSET_MASK, u4tmp2);
} else {
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_5g[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_reg_pram_c_5g[index][i]);
}
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -1351,7 +1292,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
path, index,
rtl_get_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_5g[i],
- BRFREGOFFSETMASK));
+ RFREG_OFFSET_MASK));
}
if (need_pwr_down)
_rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1381,7 +1322,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
i++) {
rtl_set_rfreg(hw, rfpath,
rf_for_c_cut_5g_internal_pa[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_pram_c_5g_int_pa[index][i]);
RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
"offset 0x%x value 0x%x path %d index %d\n",
@@ -1422,13 +1363,13 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
if (rf_reg_for_c_cut_2g[i] == RF_SYN_G7)
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_2g[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
(rf_reg_param_for_c_cut_2g[index][i] |
BIT(17)));
else
rtl_set_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_2g[i],
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_reg_param_for_c_cut_2g
[index][i]);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -1438,14 +1379,14 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rf_reg_mask_for_c_cut_2g[i], path, index,
rtl_get_rfreg(hw, (enum radio_path)path,
rf_reg_for_c_cut_2g[i],
- BRFREGOFFSETMASK));
+ RFREG_OFFSET_MASK));
}
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4,
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
if (need_pwr_down)
_rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1493,41 +1434,41 @@ static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
/* path-A IQK setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
if (rtlhal->interfaceindex == 0) {
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c1f);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
} else {
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c22);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c22);
}
- rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140102);
- rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x28160206);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160206);
/* path-B IQK setting */
if (configpathb) {
- rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x10008c22);
- rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x10008c22);
- rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140102);
- rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x28160206);
+ rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160206);
}
/* LO calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
- rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
/* One shot, path A LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Delay %d ms for One shot, path A LOK & IQK\n",
IQK_DELAY_TIME);
mdelay(IQK_DELAY_TIME);
/* Check failed */
- regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
- rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
+ rege94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
- rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
+ rege9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
- regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
+ regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
if (!(regeac & BIT(28)) && (((rege94 & 0x03FF0000) >> 16) != 0x142) &&
(((rege9c & 0x03FF0000) >> 16) != 0x42))
@@ -1563,42 +1504,42 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK!\n");
/* path-A IQK setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
- rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140307);
- rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68160960);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x68160960);
/* path-B IQK setting */
if (configpathb) {
- rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f);
- rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f);
- rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82110000);
- rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68110000);
+ rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x68110000);
}
/* LO calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
- rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
/* path-A PA on */
- rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x07000f60);
- rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD, 0x66e60e30);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x07000f60);
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD, 0x66e60e30);
for (i = 0; i < retrycount; i++) {
/* One shot, path A LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"One shot, path A LOK & IQK!\n");
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Delay %d ms for One shot, path A LOK & IQK.\n",
IQK_DELAY_TIME);
mdelay(IQK_DELAY_TIME * 10);
/* Check failed */
- regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
- rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
+ rege94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
- rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
+ rege9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
- regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
+ regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
if (!(regeac & TxOKBit) &&
(((rege94 & 0x03FF0000) >> 16) != 0x142)) {
@@ -1620,9 +1561,9 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
}
}
/* path A PA off */
- rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
rtlphy->iqk_bb_backup[0]);
- rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD,
rtlphy->iqk_bb_backup[1]);
return result;
}
@@ -1637,22 +1578,22 @@ static u8 _rtl92d_phy_pathb_iqk(struct ieee80211_hw *hw)
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n");
/* One shot, path B LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
- rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000002);
- rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000000);
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Delay %d ms for One shot, path B LOK & IQK\n", IQK_DELAY_TIME);
mdelay(IQK_DELAY_TIME);
/* Check failed */
- regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
- regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
+ regeb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
- regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
+ regebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
- regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
+ regec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
- regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
+ regecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
if (!(regeac & BIT(31)) && (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
(((regebc & 0x03FF0000) >> 16) != 0x42))
@@ -1680,31 +1621,31 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQK!\n");
/* path-A IQK setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
- rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82110000);
- rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68110000);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x68110000);
/* path-B IQK setting */
- rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f);
- rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f);
- rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140307);
- rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68160960);
+ rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x68160960);
/* LO calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
- rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
/* path-B PA on */
- rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x0f600700);
- rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD, 0x061f0d30);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x0f600700);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD, 0x061f0d30);
for (i = 0; i < retrycount; i++) {
/* One shot, path B LOK & IQK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"One shot, path A LOK & IQK!\n");
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xfa000000);
- rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xfa000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
/* delay x ms */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -1712,15 +1653,15 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
mdelay(IQK_DELAY_TIME * 10);
/* Check failed */
- regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
- regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
+ regeb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
- regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
+ regebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
- regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
+ regec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
- regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
+ regecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
if (!(regeac & BIT(31)) &&
(((regeb4 & 0x03FF0000) >> 16) != 0x142))
@@ -1738,9 +1679,9 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
}
/* path B PA off */
- rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
rtlphy->iqk_bb_backup[0]);
- rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD,
rtlphy->iqk_bb_backup[2]);
return result;
}
@@ -1754,7 +1695,7 @@ static void _rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n");
for (i = 0; i < regnum; i++)
- adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], BMASKDWORD);
+ adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD);
}
static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
@@ -1779,7 +1720,7 @@ static void _rtl92d_phy_reload_adda_registers(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Reload ADDA power saving parameters !\n");
for (i = 0; i < regnum; i++)
- rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, adda_backup[i]);
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, adda_backup[i]);
}
static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw,
@@ -1807,7 +1748,7 @@ static void _rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
0x04db25a4 : 0x0b1b25a4;
for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, pathon);
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon);
}
static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
@@ -1830,9 +1771,9 @@ static void _rtl92d_phy_patha_standby(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A standby mode!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x0);
- rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD, 0x00010000);
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
}
static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
@@ -1843,8 +1784,8 @@ static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"BB Switch to %s mode!\n", pi_mode ? "PI" : "SI");
mode = pi_mode ? 0x01000100 : 0x01000000;
- rtl_set_bbreg(hw, 0x820, BMASKDWORD, mode);
- rtl_set_bbreg(hw, 0x828, BMASKDWORD, mode);
+ rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+ rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
}
static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
@@ -1875,7 +1816,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 2.4G :Start!!!\n");
if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
is2t ? "2T2R" : "1T1R");
@@ -1898,40 +1839,40 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
_rtl92d_phy_pimode_switch(hw, true);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600);
- rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4);
- rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22204000);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22204000);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
if (is2t) {
- rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD,
0x00010000);
- rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, BMASKDWORD,
+ rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, MASKDWORD,
0x00010000);
}
/* MAC settings */
_rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
rtlphy->iqk_mac_backup);
/* Page B init */
- rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
if (is2t)
- rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x0f600000);
/* IQ calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
- rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x01007c00);
- rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl92d_phy_patha_iqk(hw, is2t);
if (patha_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path A IQK Success!!\n");
- result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) &
+ result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) &
+ result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
0x3FF0000) >> 16;
break;
} else if (i == (retrycount - 1) && patha_ok == 0x01) {
@@ -1939,9 +1880,9 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path A IQK Only Tx Success!!\n");
- result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
0x3FF0000) >> 16;
}
}
@@ -1957,22 +1898,22 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path B IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
result[t][5] = (rtl_get_bbreg(hw, 0xebc,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
result[t][6] = (rtl_get_bbreg(hw, 0xec4,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
result[t][7] = (rtl_get_bbreg(hw, 0xecc,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
break;
} else if (i == (retrycount - 1) && pathb_ok == 0x01) {
/* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path B Only Tx IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
result[t][5] = (rtl_get_bbreg(hw, 0xebc,
- BMASKDWORD) & 0x3FF0000) >> 16;
+ MASKDWORD) & 0x3FF0000) >> 16;
}
}
if (0x00 == pathb_ok)
@@ -1984,7 +1925,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"IQK:Back to BB mode, load original value!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
if (t != 0) {
/* Switch back BB to SI mode after finish IQ Calibration. */
if (!rtlphy->rfpi_enable)
@@ -2004,8 +1945,8 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
rtlphy->iqk_bb_backup,
IQK_BB_REG_NUM - 1);
/* load 0xe30 IQC default value */
- rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x01008c00);
- rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
}
RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n");
}
@@ -2042,7 +1983,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 5G NORMAL:Start!!!\n");
mdelay(IQK_DELAY_TIME * 20);
if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
is2t ? "2T2R" : "1T1R");
@@ -2072,38 +2013,38 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
if (!rtlphy->rfpi_enable)
_rtl92d_phy_pimode_switch(hw, true);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600);
- rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4);
- rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22208000);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22208000);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
/* Page B init */
- rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
if (is2t)
- rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x0f600000);
/* IQ calibration setting */
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
- rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x10007c00);
- rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x10007c00);
+ rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
patha_ok = _rtl92d_phy_patha_iqk_5g_normal(hw, is2t);
if (patha_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Success!!\n");
- result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) &
+ result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) &
+ result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
0x3FF0000) >> 16;
} else if (patha_ok == 0x01) { /* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path A IQK Only Tx Success!!\n");
- result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
0x3FF0000) >> 16;
} else {
RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Fail!!\n");
@@ -2116,20 +2057,20 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
if (pathb_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path B IQK Success!!\n");
- result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
+ result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][6] = (rtl_get_bbreg(hw, 0xec4, BMASKDWORD) &
+ result[t][6] = (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][7] = (rtl_get_bbreg(hw, 0xecc, BMASKDWORD) &
+ result[t][7] = (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
0x3FF0000) >> 16;
} else if (pathb_ok == 0x01) { /* Tx IQK OK */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"Path B Only Tx IQK Success!!\n");
- result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
+ result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
0x3FF0000) >> 16;
- result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
0x3FF0000) >> 16;
} else {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -2140,7 +2081,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
/* Back to BB mode, load original value */
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"IQK:Back to BB mode, load original value!\n");
- rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
if (t != 0) {
if (is2t)
_rtl92d_phy_reload_adda_registers(hw, iqk_bb_reg,
@@ -2240,7 +2181,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
return;
} else if (iqk_ok) {
oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD) >> 22) & 0x3FF; /* OFDM0_D */
+ MASKDWORD) >> 22) & 0x3FF; /* OFDM0_D */
val_x = result[final_candidate][0];
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
@@ -2271,7 +2212,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
((val_y * oldval_0 >> 7) & 0x1));
RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n",
rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
- BMASKDWORD));
+ MASKDWORD));
if (txonly) {
RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
return;
@@ -2299,7 +2240,7 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
return;
} else if (iqk_ok) {
oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
- BMASKDWORD) >> 22) & 0x3FF;
+ MASKDWORD) >> 22) & 0x3FF;
val_x = result[final_candidate][4];
if ((val_x & 0x00000200) != 0)
val_x = val_x | 0xFFFFFC00;
@@ -2657,7 +2598,7 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
rf_mode[index] = rtl_read_byte(rtlpriv, offset);
/* 2. Set RF mode = standby mode */
rtl_set_rfreg(hw, (enum radio_path)index, RF_AC,
- BRFREGOFFSETMASK, 0x010000);
+ RFREG_OFFSET_MASK, 0x010000);
if (rtlpci->init_ready) {
/* switch CV-curve control by LC-calibration */
rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
@@ -2667,16 +2608,16 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
0x08000, 0x01);
}
u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, RF_SYN_G6,
- BRFREGOFFSETMASK);
+ RFREG_OFFSET_MASK);
while ((!(u4tmp & BIT(11))) && timecount <= timeout) {
mdelay(50);
timecount += 50;
u4tmp = rtl_get_rfreg(hw, (enum radio_path)index,
- RF_SYN_G6, BRFREGOFFSETMASK);
+ RF_SYN_G6, RFREG_OFFSET_MASK);
}
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"PHY_LCK finish delay for %d ms=2\n", timecount);
- u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, BRFREGOFFSETMASK);
+ u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK);
if (index == 0 && rtlhal->interfaceindex == 0) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"path-A / 5G LCK\n");
@@ -2696,9 +2637,9 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
0x7f, i);
rtl_set_rfreg(hw, (enum radio_path)index, 0x4D,
- BRFREGOFFSETMASK, 0x0);
+ RFREG_OFFSET_MASK, 0x0);
readval = rtl_get_rfreg(hw, (enum radio_path)index,
- 0x4F, BRFREGOFFSETMASK);
+ 0x4F, RFREG_OFFSET_MASK);
curvecount_val[2 * i + 1] = (readval & 0xfffe0) >> 5;
/* reg 0x4f [4:0] */
/* reg 0x50 [19:10] */
@@ -2912,7 +2853,7 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
}
rtl_set_rfreg(hw, (enum radio_path)rfpath,
currentcmd->para1,
- BRFREGOFFSETMASK,
+ RFREG_OFFSET_MASK,
rtlphy->rfreg_chnlval[rfpath]);
_rtl92d_phy_reload_imr_setting(hw, channel,
rfpath);
@@ -2960,7 +2901,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
rtlhal->bandset == BAND_ON_BOTH) {
ret_value = rtl_get_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
- BMASKDWORD);
+ MASKDWORD);
if (rtlphy->current_channel > 14 && !(ret_value & BIT(0)))
rtl92d_phy_switch_wirelessband(hw, BAND_ON_5G);
else if (rtlphy->current_channel <= 14 && (ret_value & BIT(0)))
@@ -3112,7 +3053,7 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
/* a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue */
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
/* b. RF path 0 offset 0x00 = 0x00 disable RF */
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
/* c. APSD_CTRL 0x600[7:0] = 0x40 */
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
/* d. APSD_CTRL 0x600[7:0] = 0x00
@@ -3120,12 +3061,12 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
* RF path 0 offset 0x00 = 0x00
* APSD_CTRL 0x600[7:0] = 0x40
* */
- u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK);
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
while (u4btmp != 0 && delay > 0) {
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
- rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
- u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK);
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
delay--;
}
if (delay == 0) {
@@ -3468,9 +3409,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
/* 5G LAN ON */
rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa);
/* TX BB gain shift*1,Just for testchip,0xc80,0xc88 */
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
0x40000100);
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
0x40000100);
if (rtlhal->macphymode == DUALMAC_DUALPHY) {
rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3524,16 +3465,16 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0);
/* TX BB gain shift,Just for testchip,0xc80,0xc88 */
if (rtlefuse->internal_pa_5g[0])
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
0x2d4000b5);
else
- rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
0x20000080);
if (rtlefuse->internal_pa_5g[1])
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
0x2d4000b5);
else
- rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
0x20000080);
if (rtlhal->macphymode == DUALMAC_DUALPHY) {
rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3560,8 +3501,8 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
}
}
/* update IQK related settings */
- rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, BMASKDWORD, 0x40000100);
- rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, BMASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000, 0x00);
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) |
BIT(26) | BIT(24), 0x00);
@@ -3590,7 +3531,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
/* DMDP */
if (rtlphy->rf_type == RF_1T1R) {
/* Use antenna 0,0xc04,0xd04 */
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x11);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x11);
rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x1);
/* enable ad/da clock1 for dual-phy reg0x888 */
@@ -3612,7 +3553,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
} else {
/* Single PHY */
/* Use antenna 0 & 1,0xc04,0xd04 */
- rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x33);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33);
rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x3);
/* disable ad/da clock1,0x888 */
rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | BIT(13), 0);
@@ -3620,9 +3561,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
rfpath++) {
rtlphy->rfreg_chnlval[rfpath] = rtl_get_rfreg(hw, rfpath,
- RF_CHNLBW, BRFREGOFFSETMASK);
+ RF_CHNLBW, RFREG_OFFSET_MASK);
rtlphy->reg_rf3c[rfpath] = rtl_get_rfreg(hw, rfpath, 0x3C,
- BRFREGOFFSETMASK);
+ RFREG_OFFSET_MASK);
}
for (i = 0; i < 2; i++)
RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
index b7498c5bafc5..7f29b8d765b3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
@@ -1295,18 +1295,4 @@
#define BWORD1 0xc
#define BDWORD 0xf
-#define BMASKBYTE0 0xff
-#define BMASKBYTE1 0xff00
-#define BMASKBYTE2 0xff0000
-#define BMASKBYTE3 0xff000000
-#define BMASKHWORD 0xffff0000
-#define BMASKLWORD 0x0000ffff
-#define BMASKDWORD 0xffffffff
-#define BMASK12BITS 0xfff
-#define BMASKH4BITS 0xf0000000
-#define BMASKOFDM_D 0xffc00000
-#define BMASKCCK 0x3f3f3f3f
-
-#define BRFREGOFFSETMASK 0xfffff
-
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
index 20144e0b4142..6a6ac540d5b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
@@ -125,7 +125,7 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
}
tmpval = tx_agc[RF90_PATH_A] & 0xff;
- rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, BMASKBYTE1, tmpval);
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
"CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
tmpval, RTXAGC_A_CCK1_MCS32);
@@ -135,7 +135,7 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
"CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
tmpval, RTXAGC_B_CCK11_A_CCK2_11);
tmpval = tx_agc[RF90_PATH_B] >> 24;
- rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, BMASKBYTE0, tmpval);
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
"CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
tmpval, RTXAGC_B_CCK11_A_CCK2_11);
@@ -360,7 +360,7 @@ static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
regoffset = regoffset_a[index];
else
regoffset = regoffset_b[index];
- rtl_set_bbreg(hw, regoffset, BMASKDWORD, writeval);
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
"Set 0x%x = %08x\n", regoffset, writeval);
if (((get_rf_type(rtlphy) == RF_2T2R) &&
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 0eb0f4ae5920..99c2ab5dfceb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -545,7 +545,7 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
@@ -786,7 +786,8 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_OWN(pdesc, 1);
}
-void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index c1b5dfb79d53..fb5cf0634e8d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -728,8 +728,8 @@ struct rx_desc_92d {
} __packed;
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr,
- u8 *pdesc, struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
@@ -737,7 +737,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
u8 *pdesc, struct sk_buff *skb);
-void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl92de_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 4f461786a7eb..9098558d916d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -251,7 +251,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 e_aci = *val;
rtl92s_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
+ if (rtlpci->acm_method != EACMWAY2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
&e_aci);
@@ -413,20 +413,18 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ &rpwm_val);
} else {
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &rpwm_val);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_FW_PSMODE_STATUS,
@@ -955,7 +953,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 tmp_byte = 0;
-
+ unsigned long flags;
bool rtstatus = true;
u8 tmp_u1b;
int err = false;
@@ -967,6 +965,16 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtlpci->being_init_adapter = true;
+ /* As this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
/* 1. MAC Initialize */
@@ -984,7 +992,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now... "
"Please copy FW into /lib/firmware/rtlwifi\n");
- return 1;
+ err = 1;
+ goto exit;
}
/* After FW download, we have to reset MAC register */
@@ -997,7 +1006,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
/* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
if (!rtl92s_phy_mac_config(hw)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n");
- return rtstatus;
+ err = rtstatus;
+ goto exit;
}
/* because last function modify RCR, so we update
@@ -1016,7 +1026,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
/* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
if (!rtl92s_phy_bb_config(hw)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n");
- return rtstatus;
+ err = rtstatus;
+ goto exit;
}
/* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */
@@ -1033,7 +1044,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
if (!rtl92s_phy_rf_config(hw)) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
- return rtstatus;
+ err = rtstatus;
+ goto exit;
}
/* After read predefined TXT, we must set BB/MAC/RF
@@ -1122,8 +1134,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON);
rtl92s_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpci->being_init_adapter = false;
-
return err;
}
@@ -1135,12 +1148,13 @@ void rtl92se_set_mac_addr(struct rtl_io *io, const u8 *addr)
void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 reg_rcr = rtlpci->receive_config;
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid) {
reg_rcr |= (RCR_CBSSID);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 9c092e6eb3fe..77c5b5f35244 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -30,6 +30,7 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
@@ -833,18 +834,7 @@ static bool _rtl92s_phy_config_bb(struct ieee80211_hw *hw, u8 configtype)
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_len; i = i + 2) {
- if (phy_reg_table[i] == 0xfe)
- mdelay(50);
- else if (phy_reg_table[i] == 0xfd)
- mdelay(5);
- else if (phy_reg_table[i] == 0xfc)
- mdelay(1);
- else if (phy_reg_table[i] == 0xfb)
- udelay(50);
- else if (phy_reg_table[i] == 0xfa)
- udelay(5);
- else if (phy_reg_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_reg_table[i]);
/* Add delay for ECS T20 & LG malow platform, */
udelay(1);
@@ -886,18 +876,7 @@ static bool _rtl92s_phy_set_bb_to_diff_rf(struct ieee80211_hw *hw,
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray2xtxr_len; i = i + 3) {
- if (phy_regarray2xtxr_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray2xtxr_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray2xtxr_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray2xtxr_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray2xtxr_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray2xtxr_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray2xtxr_table[i]);
rtl92s_phy_set_bb_reg(hw, phy_regarray2xtxr_table[i],
phy_regarray2xtxr_table[i + 1],
@@ -920,18 +899,7 @@ static bool _rtl92s_phy_config_bb_with_pg(struct ieee80211_hw *hw,
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_pg_len; i = i + 3) {
- if (phy_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_table_pg[i]);
_rtl92s_store_pwrindex_diffrate_offset(hw,
phy_table_pg[i],
@@ -1034,28 +1002,9 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radio_a_tblen; i = i + 2) {
- if (radio_a_table[i] == 0xfe)
- /* Delay specific ms. Only RF configuration
- * requires delay. */
- mdelay(50);
- else if (radio_a_table[i] == 0xfd)
- mdelay(5);
- else if (radio_a_table[i] == 0xfc)
- mdelay(1);
- else if (radio_a_table[i] == 0xfb)
- udelay(50);
- else if (radio_a_table[i] == 0xfa)
- udelay(5);
- else if (radio_a_table[i] == 0xf9)
- udelay(1);
- else
- rtl92s_phy_set_rf_reg(hw, rfpath,
- radio_a_table[i],
- MASK20BITS,
- radio_a_table[i + 1]);
+ rtl_rfreg_delay(hw, rfpath, radio_a_table[i],
+ MASK20BITS, radio_a_table[i + 1]);
- /* Add delay for ECS T20 & LG malow platform */
- udelay(1);
}
/* PA Bias current for inferiority IC */
@@ -1063,28 +1012,8 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
break;
case RF90_PATH_B:
for (i = 0; i < radio_b_tblen; i = i + 2) {
- if (radio_b_table[i] == 0xfe)
- /* Delay specific ms. Only RF configuration
- * requires delay.*/
- mdelay(50);
- else if (radio_b_table[i] == 0xfd)
- mdelay(5);
- else if (radio_b_table[i] == 0xfc)
- mdelay(1);
- else if (radio_b_table[i] == 0xfb)
- udelay(50);
- else if (radio_b_table[i] == 0xfa)
- udelay(5);
- else if (radio_b_table[i] == 0xf9)
- udelay(1);
- else
- rtl92s_phy_set_rf_reg(hw, rfpath,
- radio_b_table[i],
- MASK20BITS,
- radio_b_table[i + 1]);
-
- /* Add delay for ECS T20 & LG malow platform */
- udelay(1);
+ rtl_rfreg_delay(hw, rfpath, radio_b_table[i],
+ MASK20BITS, radio_b_table[i + 1]);
}
break;
case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index c81c83591940..e13043479b71 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -1165,16 +1165,4 @@
#define BTX_AGCRATECCK 0x7f00
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-
-#define MAKS12BITS 0xfffff
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 92d38ab3c60e..78a81c1e390b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -52,7 +52,7 @@ static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
/* We only care about the path A for legacy. */
if (rtlefuse->eeprom_version < 2) {
pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_httxpowerdiff & 0xf);
- } else if (rtlefuse->eeprom_version >= 2) {
+ } else {
legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff
[RF90_PATH_A][chnl - 1];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 27efbcdac6a9..36b48be8329c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
/* during testing, hdr was NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
@@ -336,7 +336,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
@@ -573,7 +573,8 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
}
}
-void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
index 64dd66f287c1..5a13f17e3b41 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -29,8 +29,9 @@
#ifndef __REALTEK_PCI92SE_TRX_H__
#define __REALTEK_PCI92SE_TRX_H__
-void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
- u8 *pdesc, struct ieee80211_tx_info *info,
+void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
@@ -39,7 +40,8 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status, u8 *pdesc,
struct sk_buff *skb);
-void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl92se_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl92se_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
index 4ed731f09b1f..9c34a85fdb89 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
@@ -10,7 +10,6 @@ rtl8723ae-objs := \
led.o \
phy.o \
pwrseq.o \
- pwrseqcmd.o \
rf.o \
sw.o \
table.o \
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
index 8c110356dff9..debe261a7eeb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
@@ -46,11 +46,6 @@
#define E_CUT_VERSION BIT(14)
#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
-enum version_8723e {
- VERSION_TEST_UMC_CHIP_8723 = 0x0081,
- VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
- VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
-};
/* MASK */
#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
index a36eee28f9e7..25cc83058b01 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
@@ -35,6 +35,7 @@
#include "def.h"
#include "phy.h"
#include "dm.h"
+#include "../rtl8723com/dm_common.h"
#include "fw.h"
#include "hal_btc.h"
@@ -483,16 +484,6 @@ static void rtl8723ae_dm_dig(struct ieee80211_hw *hw)
rtl8723ae_dm_ctrl_initgain_by_twoport(hw);
}
-static void rtl8723ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.dynamic_txpower_enable = false;
-
- rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
- rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
-}
-
static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -585,19 +576,6 @@ void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw)
}
}
-static void rtl8723ae_dm_pwdmonitor(struct ieee80211_hw *hw)
-{
-}
-
-void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.current_turbo_edca = false;
- rtlpriv->dm.is_any_nonbepkts = false;
- rtlpriv->dm.is_cur_rdlstate = false;
-}
-
static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -669,9 +647,8 @@ static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *) (&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
@@ -778,17 +755,6 @@ static void rtl8723ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
}
}
-static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
- rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
- rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
- rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
- rtlpriv->dm_pstable.rssi_val_min = 0;
-}
-
void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -905,11 +871,11 @@ void rtl8723ae_dm_init(struct ieee80211_hw *hw)
rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
rtl8723ae_dm_diginit(hw);
- rtl8723ae_dm_init_dynamic_txpower(hw);
- rtl8723ae_dm_init_edca_turbo(hw);
+ rtl8723_dm_init_dynamic_txpower(hw);
+ rtl8723_dm_init_edca_turbo(hw);
rtl8723ae_dm_init_rate_adaptive_mask(hw);
rtl8723ae_dm_initialize_txpower_tracking(hw);
- rtl8723ae_dm_init_dynamic_bpowersaving(hw);
+ rtl8723_dm_init_dynamic_bb_powersaving(hw);
}
void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
@@ -930,7 +896,6 @@ void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
if ((ppsc->rfpwr_state == ERFON) &&
((!fw_current_inpsmode) && fw_ps_awake) &&
(!ppsc->rfchange_inprogress)) {
- rtl8723ae_dm_pwdmonitor(hw);
rtl8723ae_dm_dig(hw);
rtl8723ae_dm_false_alarm_counter_statistics(hw);
rtl8723ae_dm_dynamic_bpowersaving(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
index a372b0204456..d253bb53d03e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
@@ -147,7 +147,6 @@ enum dm_dig_connect_e {
void rtl8723ae_dm_init(struct ieee80211_hw *hw);
void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw);
void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw);
-void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw);
void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
index ba1502b172a6..728b7563ad36 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
@@ -34,199 +34,7 @@
#include "reg.h"
#include "def.h"
#include "fw.h"
-
-static void _rtl8723ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp;
- if (enable) {
- tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
-
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
-
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
- rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
- } else {
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
-
- rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
- }
-}
-
-static void _rtl8723ae_fw_block_write(struct ieee80211_hw *hw,
- const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 blockSize = sizeof(u32);
- u8 *bufferPtr = (u8 *) buffer;
- u32 *pu4BytePtr = (u32 *) buffer;
- u32 i, offset, blockCount, remainSize;
-
- blockCount = size / blockSize;
- remainSize = size % blockSize;
-
- for (i = 0; i < blockCount; i++) {
- offset = i * blockSize;
- rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4BytePtr + i));
- }
-
- if (remainSize) {
- offset = blockCount * blockSize;
- bufferPtr += offset;
- for (i = 0; i < remainSize; i++) {
- rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
- offset + i), *(bufferPtr + i));
- }
- }
-}
-
-static void _rtl8723ae_fw_page_write(struct ieee80211_hw *hw,
- u32 page, const u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 value8;
- u8 u8page = (u8) (page & 0x07);
-
- value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
- rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
- _rtl8723ae_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl8723ae_write_fw(struct ieee80211_hw *hw,
- enum version_8723e version, u8 *buffer,
- u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *bufferPtr = (u8 *) buffer;
- u32 page_nums, remain_size;
- u32 page, offset;
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
-
- page_nums = size / FW_8192C_PAGE_SIZE;
- remain_size = size % FW_8192C_PAGE_SIZE;
-
- if (page_nums > 6) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Page numbers should not be greater then 6\n");
- }
-
- for (page = 0; page < page_nums; page++) {
- offset = page * FW_8192C_PAGE_SIZE;
- _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
- FW_8192C_PAGE_SIZE);
- }
-
- if (remain_size) {
- offset = page_nums * FW_8192C_PAGE_SIZE;
- page = page_nums;
- _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
- remain_size);
- }
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
-}
-
-static int _rtl8723ae_fw_free_to_go(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int err = -EIO;
- u32 counter = 0;
- u32 value32;
-
- do {
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
- (!(value32 & FWDL_ChkSum_rpt)));
-
- if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
- value32);
- goto exit;
- }
-
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- value32 |= MCUFWDL_RDY;
- value32 &= ~WINTINI_RDY;
- rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
-
- counter = 0;
-
- do {
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- if (value32 & WINTINI_RDY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
- value32);
- err = 0;
- goto exit;
- }
-
- mdelay(FW_8192C_POLLING_DELAY);
-
- } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
-
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
-
-exit:
- return err;
-}
-
-int rtl8723ae_download_fw(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl8723ae_firmware_header *pfwheader;
- u8 *pfwdata;
- u32 fwsize;
- int err;
- enum version_8723e version = rtlhal->version;
-
- if (!rtlhal->pfirmware)
- return 1;
-
- pfwheader = (struct rtl8723ae_firmware_header *)rtlhal->pfirmware;
- pfwdata = (u8 *) rtlhal->pfirmware;
- fwsize = rtlhal->fwsize;
-
- if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtl8723ae_firmware_header));
-
- pfwdata = pfwdata + sizeof(struct rtl8723ae_firmware_header);
- fwsize = fwsize - sizeof(struct rtl8723ae_firmware_header);
- }
-
- if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
- rtl8723ae_firmware_selfreset(hw);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
- }
- _rtl8723ae_enable_fw_download(hw, true);
- _rtl8723ae_write_fw(hw, version, pfwdata, fwsize);
- _rtl8723ae_enable_fw_download(hw, false);
-
- err = _rtl8723ae_fw_free_to_go(hw);
- if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "Firmware is not ready to run!\n");
- } else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Firmware is ready to run!\n");
- }
- return 0;
-}
+#include "../rtl8723com/fw_common.h"
static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
{
@@ -463,50 +271,6 @@ void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw,
return;
}
-void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
-{
- u8 u1tmp;
- u8 delay = 100;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
- u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
-
- while (u1tmp & BIT(2)) {
- delay--;
- if (delay == 0)
- break;
- udelay(50);
- u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- }
- if (delay == 0) {
- u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
- }
-}
-
-void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 u1_h2c_set_pwrmode[3] = { 0 };
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
-
- SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
- SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
- (rtlpriv->mac80211.p2p) ?
- ppsc->smart_ps : 1);
- SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
- ppsc->reg_max_lps_awakeintvl);
-
- RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
- "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
- u1_h2c_set_pwrmode, 3);
- rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
-
-}
-
static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
@@ -812,7 +576,6 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
p2p_ps_offload->offload_en = 1;
-
if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
p2p_ps_offload->role = 1;
p2p_ps_offload->allstasleep = 0;
@@ -836,3 +599,24 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload);
}
+
+void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1_h2c_set_pwrmode[3] = { 0 };
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+
+ SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
+ SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(u1_h2c_set_pwrmode,
+ (rtlpriv->mac80211.p2p) ?
+ ppsc->smart_ps : 1);
+ SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
+ ppsc->reg_max_lps_awakeintvl);
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
+ u1_h2c_set_pwrmode, 3);
+ rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
index ed3b795e6980..d355b85dd9fe 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
@@ -34,7 +34,7 @@
#define FW_8192C_END_ADDRESS 0x3FFF
#define FW_8192C_PAGE_SIZE 4096
#define FW_8192C_POLLING_DELAY 5
-#define FW_8192C_POLLING_TIMEOUT_COUNT 1000
+#define FW_8192C_POLLING_TIMEOUT_COUNT 6000
#define BEACON_PG 0
#define PSPOLL_PG 2
@@ -65,21 +65,9 @@ struct rtl8723ae_firmware_header {
u32 rsvd5;
};
-enum rtl8192c_h2c_cmd {
- H2C_AP_OFFLOAD = 0,
- H2C_SETPWRMODE = 1,
- H2C_JOINBSSRPT = 2,
- H2C_RSVDPAGE = 3,
- H2C_RSSI_REPORT = 4,
- H2C_P2P_PS_CTW_CMD = 5,
- H2C_P2P_PS_OFFLOAD = 6,
- H2C_RA_MASK = 7,
- MAX_H2CCMD
-};
-
#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
@@ -92,10 +80,8 @@ enum rtl8192c_h2c_cmd {
#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
-int rtl8723ae_download_fw(struct ieee80211_hw *hw);
void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
-void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 3d092e4b0b7f..48fee1be78c2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -31,6 +31,8 @@
#include "../pci.h"
#include "dm.h"
#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "../rtl8723com/fw_common.h"
#include "phy.h"
#include "reg.h"
#include "hal_btc.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
index 68c28340f791..5d534df8d90c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
@@ -30,7 +30,9 @@
#include "hal_btc.h"
#include "../pci.h"
#include "phy.h"
+#include "../rtl8723com/phy_common.h"
#include "fw.h"
+#include "../rtl8723com/fw_common.h"
#include "reg.h"
#include "def.h"
@@ -391,13 +393,13 @@ static void rtl8723ae_dm_bt_set_sw_full_time_dac_swing(struct ieee80211_hw *hw,
if (sw_dac_swing_on) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
- rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000,
- sw_dac_swing_lvl);
+ rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000,
+ sw_dac_swing_lvl);
rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
} else {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], SwDacSwing Off!\n");
- rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
+ rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index c333dfd116b8..65c9e80e1f78 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -38,10 +38,11 @@
#include "def.h"
#include "phy.h"
#include "dm.h"
+#include "../rtl8723com/dm_common.h"
#include "fw.h"
+#include "../rtl8723com/fw_common.h"
#include "led.h"
#include "hw.h"
-#include "pwrseqcmd.h"
#include "pwrseq.h"
#include "btc.h"
@@ -206,14 +207,13 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *) (&e_aci));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &e_aci);
}
break; }
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool)*val;
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@@ -224,7 +224,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
@@ -248,7 +248,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -272,7 +272,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
else
p_regtoset = regtoset_normal;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -303,16 +303,15 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break; }
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
- rtl8723ae_dm_init_edca_turbo(hw);
+ u8 e_aci = *val;
+ rtl8723_dm_init_edca_turbo(hw);
- if (rtlpci->acm_method != eAcmWay2_SW)
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_ACM_CTRL,
- (u8 *) (&e_aci));
+ if (rtlpci->acm_method != EACMWAY2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
+ &e_aci);
break; }
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -365,7 +364,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlpci->receive_config = ((u32 *) (val))[0];
break;
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = *val;
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -378,13 +377,13 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl8723ae_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:{
u8 rpwm_val;
@@ -393,27 +392,25 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
udelay(1);
if (rpwm_val & BIT(7)) {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- (*(u8 *) val));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
} else {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- ((*(u8 *) val) | BIT(7)));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
}
break; }
case HW_VAR_H2C_FW_PWRMODE:{
- u8 psmode = (*(u8 *) val);
+ u8 psmode = *val;
if (psmode != FW_PS_ACTIVE_MODE)
rtl8723ae_dm_rf_saving(hw, true);
- rtl8723ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ rtl8723ae_set_fw_pwrmode_cmd(hw, *val);
break; }
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = *val;
u8 tmp_regcr, tmp_reg422;
bool recover = false;
@@ -446,11 +443,11 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr & ~(BIT(0))));
}
- rtl8723ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl8723ae_set_fw_joinbss_report_cmd(hw, *val);
break; }
case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
- rtl8723ae_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
+ rtl8723ae_set_p2p_ps_offload_cmd(hw, *val);
break;
case HW_VAR_AID:{
u16 u2btmp;
@@ -460,7 +457,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->assoc_id));
break; }
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = *val;
if (btype_ibss == true)
_rtl8723ae_stop_tx_beacon(hw);
@@ -490,20 +487,18 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u8 *)(&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ &rpwm_val);
} else {
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *)(&rpwm_val));
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_H2C_FW_PWRMODE,
- (u8 *)(&fw_pwrmode));
+ &rpwm_val);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &fw_pwrmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_FW_PSMODE_STATUS,
@@ -880,23 +875,33 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
bool rtstatus = true;
int err;
u8 tmp_u1b;
+ unsigned long flags;
rtlpriv->rtlhal.being_init_adapter = true;
+ /* As this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl8712e_init_mac(hw);
if (rtstatus != true) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
- return err;
+ goto exit;
}
- err = rtl8723ae_download_fw(hw);
+ err = rtl8723_download_fw(hw, false);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- rtlhal->fw_ready = false;
- return err;
+ goto exit;
} else {
rtlhal->fw_ready = true;
}
@@ -971,6 +976,8 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl8723ae_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpriv->rtlhal.being_init_adapter = false;
return err;
}
@@ -1112,12 +1119,13 @@ static int _rtl8723ae_set_media_status(struct ieee80211_hw *hw,
void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u32 reg_rcr = rtlpci->receive_config;
+ u32 reg_rcr;
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
if (check_bssid == true) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1153,7 +1161,7 @@ void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- rtl8723ae_dm_init_edca_turbo(hw);
+ rtl8723_dm_init_edca_turbo(hw);
switch (aci) {
case AC1_BK:
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
@@ -1614,10 +1622,10 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
rtl8723ae_read_bt_coexist_info_from_hwpg(hw,
rtlefuse->autoload_failflag, hwinfo);
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
@@ -1655,7 +1663,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
CHK_SVID_SMID(0x10EC, 0x9185))
rtlhal->oem_id = RT_CID_TOSHIBA;
else if (rtlefuse->eeprom_svid == 0x1025)
- rtlhal->oem_id = RT_CID_819x_Acer;
+ rtlhal->oem_id = RT_CID_819X_ACER;
else if (CHK_SVID_SMID(0x10EC, 0x6191) ||
CHK_SVID_SMID(0x10EC, 0x6192) ||
CHK_SVID_SMID(0x10EC, 0x6193) ||
@@ -1665,7 +1673,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
CHK_SVID_SMID(0x10EC, 0x8191) ||
CHK_SVID_SMID(0x10EC, 0x8192) ||
CHK_SVID_SMID(0x10EC, 0x8193))
- rtlhal->oem_id = RT_CID_819x_SAMSUNG;
+ rtlhal->oem_id = RT_CID_819X_SAMSUNG;
else if (CHK_SVID_SMID(0x10EC, 0x8195) ||
CHK_SVID_SMID(0x10EC, 0x9195) ||
CHK_SVID_SMID(0x10EC, 0x7194) ||
@@ -1673,24 +1681,24 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
CHK_SVID_SMID(0x10EC, 0x8201) ||
CHK_SVID_SMID(0x10EC, 0x8202) ||
CHK_SVID_SMID(0x10EC, 0x9200))
- rtlhal->oem_id = RT_CID_819x_Lenovo;
+ rtlhal->oem_id = RT_CID_819X_LENOVO;
else if (CHK_SVID_SMID(0x10EC, 0x8197) ||
CHK_SVID_SMID(0x10EC, 0x9196))
- rtlhal->oem_id = RT_CID_819x_CLEVO;
+ rtlhal->oem_id = RT_CID_819X_CLEVO;
else if (CHK_SVID_SMID(0x1028, 0x8194) ||
CHK_SVID_SMID(0x1028, 0x8198) ||
CHK_SVID_SMID(0x1028, 0x9197) ||
CHK_SVID_SMID(0x1028, 0x9198))
- rtlhal->oem_id = RT_CID_819x_DELL;
+ rtlhal->oem_id = RT_CID_819X_DELL;
else if (CHK_SVID_SMID(0x103C, 0x1629))
- rtlhal->oem_id = RT_CID_819x_HP;
+ rtlhal->oem_id = RT_CID_819X_HP;
else if (CHK_SVID_SMID(0x1A32, 0x2315))
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
else if (CHK_SVID_SMID(0x10EC, 0x8203))
- rtlhal->oem_id = RT_CID_819x_PRONETS;
+ rtlhal->oem_id = RT_CID_819X_PRONETS;
else if (CHK_SVID_SMID(0x1043, 0x84B5))
rtlhal->oem_id =
- RT_CID_819x_Edimax_ASUS;
+ RT_CID_819X_EDIMAX_ASUS;
else
rtlhal->oem_id = RT_CID_DEFAULT;
} else if (rtlefuse->eeprom_did == 0x8178) {
@@ -1712,12 +1720,12 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
CHK_SVID_SMID(0x10EC, 0x9185))
rtlhal->oem_id = RT_CID_TOSHIBA;
else if (rtlefuse->eeprom_svid == 0x1025)
- rtlhal->oem_id = RT_CID_819x_Acer;
+ rtlhal->oem_id = RT_CID_819X_ACER;
else if (CHK_SVID_SMID(0x10EC, 0x8186))
- rtlhal->oem_id = RT_CID_819x_PRONETS;
+ rtlhal->oem_id = RT_CID_819X_PRONETS;
else if (CHK_SVID_SMID(0x1043, 0x8486))
rtlhal->oem_id =
- RT_CID_819x_Edimax_ASUS;
+ RT_CID_819X_EDIMAX_ASUS;
else
rtlhal->oem_id = RT_CID_DEFAULT;
} else {
@@ -1731,7 +1739,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
rtlhal->oem_id = RT_CID_CCX;
break;
case EEPROM_CID_QMI:
- rtlhal->oem_id = RT_CID_819x_QMI;
+ rtlhal->oem_id = RT_CID_819X_QMI;
break;
case EEPROM_CID_WHQL:
break;
@@ -2037,8 +2045,7 @@ void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 sifs_timer;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
index 5d318a85eda4..3ea78afdec73 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -30,12 +30,14 @@
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
+#include "../core.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "rf.h"
#include "dm.h"
#include "table.h"
+#include "../rtl8723com/phy_common.h"
/* static forward definitions */
static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
@@ -43,72 +45,17 @@ static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath,
u32 offset, u32 data);
-static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset);
-static void _phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset, u32 data);
-static u32 _phy_calculate_bit_shift(u32 bitmask);
static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw);
static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype);
static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype);
-static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
-static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
- u32 cmdtableidx, u32 cmdtablesz,
- enum swchnlcmd_id cmdid,
- u32 para1, u32 para2,
- u32 msdelay);
static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
u8 *stage, u8 *step, u32 *delay);
static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
enum wireless_mode wirelessmode,
long power_indbm);
-static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode, u8 txpwridx);
static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw);
-u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
- u32 bitmask)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 returnvalue, originalvalue, bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
- originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
- returnvalue = (originalvalue & bitmask) >> bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask, regaddr,
- originalvalue);
-
- return returnvalue;
-}
-
-void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 originalvalue, bitshift;
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr,
- bitmask, data);
-
- if (bitmask != MASKDWORD) {
- originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
- data = ((originalvalue & (~bitmask)) | (data << bitshift));
- }
-
- rtl_write_dword(rtlpriv, regaddr, data);
-
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
-}
-
u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr, u32 bitmask)
{
@@ -124,11 +71,11 @@ u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
if (rtlphy->rf_mode != RF_OP_BY_FW)
- original_value = _phy_rf_serial_read(hw, rfpath, regaddr);
+ original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
else
original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
@@ -157,19 +104,19 @@ void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
- original_value = _phy_rf_serial_read(hw, rfpath,
- regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
+ original_value = rtl8723_phy_rf_serial_read(hw, rfpath,
+ regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) |
(data << bitshift));
}
- _phy_rf_serial_write(hw, rfpath, regaddr, data);
+ rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data);
} else {
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _phy_fw_rf_serial_read(hw, rfpath,
regaddr);
- bitshift = _phy_calculate_bit_shift(bitmask);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) |
(data << bitshift));
}
@@ -197,87 +144,6 @@ static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
RT_ASSERT(false, "deprecated!\n");
}
-static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- u32 newoffset;
- u32 tmplong, tmplong2;
- u8 rfpi_enable = 0;
- u32 retvalue;
-
- offset &= 0x3f;
- newoffset = offset;
- if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
- return 0xFFFFFFFF;
- }
- tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
- if (rfpath == RF90_PATH_A)
- tmplong2 = tmplong;
- else
- tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
- tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
- (newoffset << 23) | BLSSIREADEDGE;
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong & (~BLSSIREADEDGE));
- mdelay(1);
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- mdelay(1);
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong | BLSSIREADEDGE);
- mdelay(1);
- if (rfpath == RF90_PATH_A)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
- BIT(8));
- else if (rfpath == RF90_PATH_B)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
- BIT(8));
- if (rfpi_enable)
- retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
- BLSSIREADBACKDATA);
- else
- retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
- BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
- return retvalue;
-}
-
-static void _phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset, u32 data)
-{
- u32 data_and_addr;
- u32 newoffset;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
- if (RT_CANNOT_IO(hw)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
- return;
- }
- offset &= 0x3f;
- newoffset = offset;
- data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
- rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
-}
-
-static u32 _phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
-}
-
static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw)
{
rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
@@ -307,7 +173,7 @@ bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw)
u8 tmpu1b;
u8 reg_hwparafile = 1;
- _phy_init_bb_rf_reg_def(hw);
+ rtl8723_phy_init_bb_rf_reg_def(hw);
/* 1. 0x28[1] = 1 */
tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_PLL_CTRL);
@@ -412,18 +278,7 @@ static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype)
phy_regarray_table = RTL8723EPHY_REG_1TARRAY;
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
- if (phy_regarray_table[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table[i]);
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
@@ -585,18 +440,7 @@ static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype)
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
- if (phy_regarray_table_pg[i] == 0xfe)
- mdelay(50);
- else if (phy_regarray_table_pg[i] == 0xfd)
- mdelay(5);
- else if (phy_regarray_table_pg[i] == 0xfc)
- mdelay(1);
- else if (phy_regarray_table_pg[i] == 0xfb)
- udelay(50);
- else if (phy_regarray_table_pg[i] == 0xfa)
- udelay(5);
- else if (phy_regarray_table_pg[i] == 0xf9)
- udelay(1);
+ rtl_addr_delay(phy_regarray_table_pg[i]);
_st_pwrIdx_dfrate_off(hw, phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
@@ -623,24 +467,9 @@ bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
- if (radioa_array_table[i] == 0xfe)
- mdelay(50);
- else if (radioa_array_table[i] == 0xfd)
- mdelay(5);
- else if (radioa_array_table[i] == 0xfc)
- mdelay(1);
- else if (radioa_array_table[i] == 0xfb)
- udelay(50);
- else if (radioa_array_table[i] == 0xfa)
- udelay(5);
- else if (radioa_array_table[i] == 0xf9)
- udelay(1);
- else {
- rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
- RFREG_OFFSET_MASK,
- radioa_array_table[i + 1]);
- udelay(1);
- }
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
}
break;
case RF90_PATH_B:
@@ -690,92 +519,6 @@ void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
-static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
- rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
- rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
- rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
- RFPGA0_XA_LSSIPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
- RFPGA0_XB_LSSIPARAMETER;
-
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
-
- rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
-
- rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
-}
-
void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -785,17 +528,17 @@ void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
long txpwr_dbm;
txpwr_level = rtlphy->cur_cck_txpwridx;
- txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level);
+ txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level);
txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
rtlefuse->legacy_ht_txpowerdiff;
- if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm)
- txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+ if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm)
+ txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
txpwr_level);
txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
- if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) >
+ if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) >
txpwr_dbm)
- txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
- txpwr_level);
+ txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level);
*powerlevel = txpwr_dbm;
}
@@ -912,28 +655,6 @@ static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
return txpwridx;
}
-static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
- enum wireless_mode wirelessmode, u8 txpwridx)
-{
- long offset;
- long pwrout_dbm;
-
- switch (wirelessmode) {
- case WIRELESS_MODE_B:
- offset = -7;
- break;
- case WIRELESS_MODE_G:
- case WIRELESS_MODE_N_24G:
- offset = -8;
- break;
- default:
- offset = -8;
- break;
- }
- pwrout_dbm = txpwridx / 2 + offset;
- return pwrout_dbm;
-}
-
void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1117,26 +838,26 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
u8 num_total_rfpath = rtlphy->num_total_rfpath;
precommoncmdcnt = 0;
- _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
- MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL,
- 0, 0, 0);
- _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
- MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL,
+ 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
postcommoncmdcnt = 0;
- _phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
- MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+ MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
rfdependcmdcnt = 0;
RT_ASSERT((channel >= 1 && channel <= 14),
"illegal channel for Zebra: %d\n", channel);
- _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
- MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
+ rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
RF_CHNLBW, channel, 10);
- _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
- MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
do {
switch (*stage) {
@@ -1204,29 +925,6 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
return false;
}
-static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
- u32 cmdtableidx, u32 cmdtablesz,
- enum swchnlcmd_id cmdid, u32 para1,
- u32 para2, u32 msdelay)
-{
- struct swchnlcmd *pcmd;
-
- if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL.\n");
- return false;
- }
-
- if (cmdtableidx >= cmdtablesz)
- return false;
-
- pcmd = cmdtable + cmdtableidx;
- pcmd->cmdid = cmdid;
- pcmd->para1 = para1;
- pcmd->para2 = para2;
- pcmd->msdelay = msdelay;
- return true;
-}
-
static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
{
u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
@@ -1297,136 +995,6 @@ static u8 _rtl8723ae_phy_path_b_iqk(struct ieee80211_hw *hw)
return result;
}
-static void phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, bool iqk_ok,
- long result[][8], u8 final_candidate,
- bool btxonly)
-{
- u32 oldval_0, x, tx0_a, reg;
- long y, tx0_c;
-
- if (final_candidate == 0xFF) {
- return;
- } else if (iqk_ok) {
- oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD) >> 22) & 0x3FF;
- x = result[final_candidate][0];
- if ((x & 0x00000200) != 0)
- x = x | 0xFFFFFC00;
- tx0_a = (x * oldval_0) >> 8;
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
- ((x * oldval_0 >> 7) & 0x1));
- y = result[final_candidate][1];
- if ((y & 0x00000200) != 0)
- y = y | 0xFFFFFC00;
- tx0_c = (y * oldval_0) >> 8;
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
- ((tx0_c & 0x3C0) >> 6));
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
- (tx0_c & 0x3F));
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
- ((y * oldval_0 >> 7) & 0x1));
- if (btxonly)
- return;
- reg = result[final_candidate][2];
- rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
- reg = result[final_candidate][3] & 0x3F;
- rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
- reg = (result[final_candidate][3] >> 6) & 0xF;
- rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
- }
-}
-
-static void phy_save_adda_regs(struct ieee80211_hw *hw,
- u32 *addareg, u32 *addabackup,
- u32 registernum)
-{
- u32 i;
-
- for (i = 0; i < registernum; i++)
- addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
-}
-
-static void phy_save_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
- u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
- macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
-}
-
-static void phy_reload_adda_regs(struct ieee80211_hw *hw, u32 *addareg,
- u32 *addabackup, u32 regiesternum)
-{
- u32 i;
-
- for (i = 0; i < regiesternum; i++)
- rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
-}
-
-static void phy_reload_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
- u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
- rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
-}
-
-static void _rtl8723ae_phy_path_adda_on(struct ieee80211_hw *hw,
- u32 *addareg, bool is_patha_on,
- bool is2t)
-{
- u32 pathOn;
- u32 i;
-
- pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2t) {
- pathOn = 0x0bdb25a0;
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
- } else {
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
- }
-
- for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
-}
-
-static void _rtl8723ae_phy_mac_setting_calibration(struct ieee80211_hw *hw,
- u32 *macreg, u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i = 0;
-
- rtl_write_byte(rtlpriv, macreg[i], 0x3F);
-
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
- rtl_write_byte(rtlpriv, macreg[i],
- (u8) (macbackup[i] & (~BIT(3))));
- rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
-}
-
-static void _rtl8723ae_phy_path_a_standby(struct ieee80211_hw *hw)
-{
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
- rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
- rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
-}
-
-static void _rtl8723ae_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
-{
- u32 mode;
-
- mode = pi_mode ? 0x01000100 : 0x01000000;
- rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
- rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
-}
-
static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8],
u8 c1, u8 c2)
{
@@ -1498,10 +1066,12 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
const u32 retrycount = 2;
if (t == 0) {
- phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
- phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
+ rtl8723_save_adda_registers(hw, adda_reg, rtlphy->adda_backup,
+ 16);
+ rtl8723_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
}
- _rtl8723ae_phy_path_adda_on(hw, adda_reg, true, is2t);
+ rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t);
if (t == 0) {
rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
RFPGA0_XA_HSSIPARAMETER1,
@@ -1509,7 +1079,7 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
}
if (!rtlphy->rfpi_enable)
- _rtl8723ae_phy_pi_mode_switch(hw, true);
+ rtl8723_phy_pi_mode_switch(hw, true);
if (t == 0) {
rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
@@ -1522,7 +1092,7 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
}
- _rtl8723ae_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg,
rtlphy->iqk_mac_backup);
rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
if (is2t)
@@ -1552,8 +1122,8 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
}
if (is2t) {
- _rtl8723ae_phy_path_a_standby(hw);
- _rtl8723ae_phy_path_adda_on(hw, adda_reg, false, is2t);
+ rtl8723_phy_path_a_standby(hw);
+ rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t);
for (i = 0; i < retrycount; i++) {
pathb_ok = _rtl8723ae_phy_path_b_iqk(hw);
if (pathb_ok == 0x03) {
@@ -1588,9 +1158,11 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
if (t != 0) {
if (!rtlphy->rfpi_enable)
- _rtl8723ae_phy_pi_mode_switch(hw, false);
- phy_reload_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
- phy_reload_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
+ rtl8723_phy_pi_mode_switch(hw, false);
+ rtl8723_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
}
}
@@ -1691,7 +1263,8 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
};
if (recovery) {
- phy_reload_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
+ rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup, 10);
return;
}
if (start_conttx || singletone)
@@ -1756,9 +1329,10 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
}
if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
- phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
- final_candidate, (reg_ea4 == 0));
- phy_save_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
+ rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+ final_candidate,
+ (reg_ea4 == 0));
+ rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
}
void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
index 007ebdbbe108..cd43139ed332 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -76,23 +76,6 @@
#define RTL92C_MAX_PATH_NUM 2
-enum swchnlcmd_id {
- CMDID_END,
- CMDID_SET_TXPOWEROWER_LEVEL,
- CMDID_BBREGWRITE10,
- CMDID_WRITEPORT_ULONG,
- CMDID_WRITEPORT_USHORT,
- CMDID_WRITEPORT_UCHAR,
- CMDID_RF_WRITEREG,
-};
-
-struct swchnlcmd {
- enum swchnlcmd_id cmdid;
- u32 para1;
- u32 para2;
- u32 msdelay;
-};
-
enum hw90_block_e {
HW90_BLOCK_MAC = 0,
HW90_BLOCK_PHY0 = 1,
@@ -183,10 +166,6 @@ struct tx_power_struct {
u32 mcs_original_offset[4][16];
};
-u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask);
-void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask, u32 data);
u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr,
u32 bitmask);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
index 7a46f9fdf558..a418acb4d0ca 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
@@ -30,7 +30,6 @@
#ifndef __RTL8723E_PWRSEQ_H__
#define __RTL8723E_PWRSEQ_H__
-#include "pwrseqcmd.h"
/*
Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd
There are 6 HW Power States:
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
index 199da366c6da..64376b38708b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
@@ -2059,22 +2059,6 @@
#define BWORD1 0xc
#define BWORD 0xf
-#define MASKBYTE0 0xff
-#define MASKBYTE1 0xff00
-#define MASKBYTE2 0xff0000
-#define MASKBYTE3 0xff000000
-#define MASKHWORD 0xffff0000
-#define MASKLWORD 0x0000ffff
-#define MASKDWORD 0xffffffff
-#define MASK12BITS 0xfff
-#define MASKH4BITS 0xf0000000
-#define MASKOFDM_D 0xffc00000
-#define MASKCCK 0x3f3f3f3f
-
-#define MASK4BITS 0x0f
-#define MASK20BITS 0xfffff
-#define RFREG_OFFSET_MASK 0xfffff
-
#define BENABLE 0x1
#define BDISABLE 0x0
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 62b204faf773..1087a3bd07fa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -37,8 +37,11 @@
#include "reg.h"
#include "def.h"
#include "phy.h"
+#include "../rtl8723com/phy_common.h"
#include "dm.h"
#include "hw.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
#include "sw.h"
#include "trx.h"
#include "led.h"
@@ -193,6 +196,11 @@ void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw)
}
}
+static bool is_fw_header(struct rtl92c_firmware_header *hdr)
+{
+ return (hdr->signature & 0xfff0) == 0x2300;
+}
+
static struct rtl_hal_ops rtl8723ae_hal_ops = {
.init_sw_vars = rtl8723ae_init_sw_vars,
.deinit_sw_vars = rtl8723ae_deinit_sw_vars,
@@ -231,13 +239,14 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
.set_key = rtl8723ae_set_key,
.init_sw_leds = rtl8723ae_init_sw_leds,
.allow_all_destaddr = rtl8723ae_allow_all_destaddr,
- .get_bbreg = rtl8723ae_phy_query_bb_reg,
- .set_bbreg = rtl8723ae_phy_set_bb_reg,
+ .get_bbreg = rtl8723_phy_query_bb_reg,
+ .set_bbreg = rtl8723_phy_set_bb_reg,
.get_rfreg = rtl8723ae_phy_query_rf_reg,
.set_rfreg = rtl8723ae_phy_set_rf_reg,
.c2h_command_handle = rtl_8723e_c2h_command_handle,
.bt_wifi_media_status_notify = rtl_8723e_bt_wifi_media_status_notify,
.bt_coex_off_before_lps = rtl8723ae_bt_coex_off_before_lps,
+ .is_fw_header = is_fw_header,
};
static struct rtl_mod_params rtl8723ae_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index 50b7be3f3a60..10b7577b6ae5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -334,7 +334,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
/* during testing, hdr could be NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
@@ -365,7 +365,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcdesc)
@@ -375,7 +375,7 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool defaultadapter = true;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 fw_qsel = _rtl8723ae_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -577,7 +577,7 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
@@ -597,7 +597,8 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
pdesc, TX_DESC_SIZE);
}
-void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
{
if (istx == true) {
switch (desc_name) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
index ad05b54bc0f1..4380b7d3a91a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
@@ -521,12 +521,6 @@ do { \
memset(__pdesc, 0, _size); \
} while (0)
-#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \
- ((rxmcs) == DESC92_RATE1M || \
- (rxmcs) == DESC92_RATE2M || \
- (rxmcs) == DESC92_RATE5_5M || \
- (rxmcs) == DESC92_RATE11M)
-
struct rx_fwinfo_8723e {
u8 gain_trsw[4];
u8 pwdb_all;
@@ -706,8 +700,8 @@ struct rx_desc_8723e {
} __packed;
void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
@@ -715,7 +709,8 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *status,
struct ieee80211_rx_status *rx_status,
u8 *pdesc, struct sk_buff *skb);
-void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
new file mode 100644
index 000000000000..59e416abd93a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
@@ -0,0 +1,19 @@
+obj-m := rtl8723be.o
+
+
+rtl8723be-objs := \
+ dm.o \
+ fw.o \
+ hw.o \
+ led.o \
+ phy.o \
+ pwrseq.o \
+ rf.o \
+ sw.o \
+ table.o \
+ trx.o \
+
+
+obj-$(CONFIG_RTL8723BE) += rtl8723be.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/rtlwifi/rtl8723be/def.h
new file mode 100644
index 000000000000..3c30b74e983d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/def.h
@@ -0,0 +1,248 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_DEF_H__
+#define __RTL8723BE_DEF_H__
+
+#define HAL_RETRY_LIMIT_INFRA 48
+#define HAL_RETRY_LIMIT_AP_ADHOC 7
+
+#define RESET_DELAY_8185 20
+
+#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
+#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
+
+#define NUM_OF_FIRMWARE_QUEUE 10
+#define NUM_OF_PAGES_IN_FW 0x100
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
+
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
+
+#define MAX_LINES_HWCONFIG_TXT 1000
+#define MAX_BYTES_LINE_HWCONFIG_TXT 256
+
+#define SW_THREE_WIRE 0
+#define HW_THREE_WIRE 2
+
+#define BT_DEMO_BOARD 0
+#define BT_QA_BOARD 1
+#define BT_FPGA 2
+
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+#define MAX_H2C_QUEUE_NUM 10
+
+#define RX_MPDU_QUEUE 0
+#define RX_CMD_QUEUE 1
+#define RX_MAX_QUEUE 2
+#define AC2QUEUEID(_AC) (_AC)
+
+#define C2H_RX_CMD_HDR_LEN 8
+#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
+#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
+#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
+#define GET_C2H_CMD_CONTINUE(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
+#define GET_C2H_CMD_CONTENT(__prxhdr) \
+ ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
+
+#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
+#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
+#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
+
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
+#define CHIP_BONDING_92C_1T2R 0x1
+
+#define CHIP_8723 BIT(0)
+#define CHIP_8723B (BIT(1) | BIT(2))
+#define NORMAL_CHIP BIT(3)
+#define RF_TYPE_1T1R (~(BIT(4) | BIT(5) | BIT(6)))
+#define RF_TYPE_1T2R BIT(4)
+#define RF_TYPE_2T2R BIT(5)
+#define CHIP_VENDOR_UMC BIT(7)
+#define B_CUT_VERSION BIT(12)
+#define C_CUT_VERSION BIT(13)
+#define D_CUT_VERSION ((BIT(12) | BIT(13)))
+#define E_CUT_VERSION BIT(14)
+#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
+/* MASK */
+#define IC_TYPE_MASK (BIT(0) | BIT(1) | BIT(2))
+#define CHIP_TYPE_MASK BIT(3)
+#define RF_TYPE_MASK (BIT(4) | BIT(5) | BIT(6))
+#define MANUFACTUER_MASK BIT(7)
+#define ROM_VERSION_MASK (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+#define CUT_VERSION_MASK (BIT(15) | BIT(14) | BIT(13) | BIT(12))
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK)
+#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK)
+#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK)
+#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK)
+#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK)
+#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK)
+
+#define IS_92C_SERIAL(version) ((IS_81XXC(version) && IS_2T2R(version)) ?\
+ true : false)
+#define IS_81XXC(version) ((GET_CVID_IC_TYPE(version) == 0) ?\
+ true : false)
+#define IS_8723_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8723) ?\
+ true : false)
+#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version)) ? false : true)
+#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\
+ ? true : false)
+#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\
+ ? true : false)
+enum rf_optype {
+ RF_OP_BY_SW_3WIRE = 0,
+ RF_OP_BY_FW,
+ RF_OP_MAX
+};
+
+enum rf_power_state {
+ RF_ON,
+ RF_OFF,
+ RF_SLEEP,
+ RF_SHUT_DOWN,
+};
+
+enum power_save_mode {
+ POWER_SAVE_MODE_ACTIVE,
+ POWER_SAVE_MODE_SAVE,
+};
+
+enum power_polocy_config {
+ POWERCFG_MAX_POWER_SAVINGS,
+ POWERCFG_GLOBAL_POWER_SAVINGS,
+ POWERCFG_LOCAL_POWER_SAVINGS,
+ POWERCFG_LENOVO,
+};
+
+enum interface_select_pci {
+ INTF_SEL1_MINICARD = 0,
+ INTF_SEL0_PCIE = 1,
+ INTF_SEL2_RSV = 2,
+ INTF_SEL3_RSV = 3,
+};
+
+enum rtl_desc_qsel {
+ QSLT_BK = 0x2,
+ QSLT_BE = 0x0,
+ QSLT_VI = 0x5,
+ QSLT_VO = 0x7,
+ QSLT_BEACON = 0x10,
+ QSLT_HIGH = 0x11,
+ QSLT_MGNT = 0x12,
+ QSLT_CMD = 0x13,
+};
+
+enum rtl_desc8723e_rate {
+ DESC92C_RATE1M = 0x00,
+ DESC92C_RATE2M = 0x01,
+ DESC92C_RATE5_5M = 0x02,
+ DESC92C_RATE11M = 0x03,
+
+ DESC92C_RATE6M = 0x04,
+ DESC92C_RATE9M = 0x05,
+ DESC92C_RATE12M = 0x06,
+ DESC92C_RATE18M = 0x07,
+ DESC92C_RATE24M = 0x08,
+ DESC92C_RATE36M = 0x09,
+ DESC92C_RATE48M = 0x0a,
+ DESC92C_RATE54M = 0x0b,
+
+ DESC92C_RATEMCS0 = 0x0c,
+ DESC92C_RATEMCS1 = 0x0d,
+ DESC92C_RATEMCS2 = 0x0e,
+ DESC92C_RATEMCS3 = 0x0f,
+ DESC92C_RATEMCS4 = 0x10,
+ DESC92C_RATEMCS5 = 0x11,
+ DESC92C_RATEMCS6 = 0x12,
+ DESC92C_RATEMCS7 = 0x13,
+ DESC92C_RATEMCS8 = 0x14,
+ DESC92C_RATEMCS9 = 0x15,
+ DESC92C_RATEMCS10 = 0x16,
+ DESC92C_RATEMCS11 = 0x17,
+ DESC92C_RATEMCS12 = 0x18,
+ DESC92C_RATEMCS13 = 0x19,
+ DESC92C_RATEMCS14 = 0x1a,
+ DESC92C_RATEMCS15 = 0x1b,
+ DESC92C_RATEMCS15_SG = 0x1c,
+ DESC92C_RATEMCS32 = 0x20,
+};
+
+enum rx_packet_type {
+ NORMAL_RX,
+ TX_REPORT1,
+ TX_REPORT2,
+ HIS_REPORT,
+};
+
+struct phy_sts_cck_8723e_t {
+ u8 adc_pwdb_X[4];
+ u8 sq_rpt;
+ u8 cck_agc_rpt;
+};
+
+struct h2c_cmd_8723e {
+ u8 element_id;
+ u32 cmd_len;
+ u8 *p_cmdbuffer;
+};
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
new file mode 100644
index 000000000000..13d53a1df789
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
@@ -0,0 +1,1325 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "../rtl8723com/dm_common.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "trx.h"
+#include "../btcoexist/rtl_btc.h"
+
+static const u32 ofdmswing_table[] = {
+ 0x0b40002d, /* 0, -15.0dB */
+ 0x0c000030, /* 1, -14.5dB */
+ 0x0cc00033, /* 2, -14.0dB */
+ 0x0d800036, /* 3, -13.5dB */
+ 0x0e400039, /* 4, -13.0dB */
+ 0x0f00003c, /* 5, -12.5dB */
+ 0x10000040, /* 6, -12.0dB */
+ 0x11000044, /* 7, -11.5dB */
+ 0x12000048, /* 8, -11.0dB */
+ 0x1300004c, /* 9, -10.5dB */
+ 0x14400051, /* 10, -10.0dB */
+ 0x15800056, /* 11, -9.5dB */
+ 0x16c0005b, /* 12, -9.0dB */
+ 0x18000060, /* 13, -8.5dB */
+ 0x19800066, /* 14, -8.0dB */
+ 0x1b00006c, /* 15, -7.5dB */
+ 0x1c800072, /* 16, -7.0dB */
+ 0x1e400079, /* 17, -6.5dB */
+ 0x20000080, /* 18, -6.0dB */
+ 0x22000088, /* 19, -5.5dB */
+ 0x24000090, /* 20, -5.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x288000a2, /* 22, -4.0dB */
+ 0x2ac000ab, /* 23, -3.5dB */
+ 0x2d4000b5, /* 24, -3.0dB */
+ 0x300000c0, /* 25, -2.5dB */
+ 0x32c000cb, /* 26, -2.0dB */
+ 0x35c000d7, /* 27, -1.5dB */
+ 0x390000e4, /* 28, -1.0dB */
+ 0x3c8000f2, /* 29, -0.5dB */
+ 0x40000100, /* 30, +0dB */
+ 0x43c0010f, /* 31, +0.5dB */
+ 0x47c0011f, /* 32, +1.0dB */
+ 0x4c000130, /* 33, +1.5dB */
+ 0x50800142, /* 34, +2.0dB */
+ 0x55400155, /* 35, +2.5dB */
+ 0x5a400169, /* 36, +3.0dB */
+ 0x5fc0017f, /* 37, +3.5dB */
+ 0x65400195, /* 38, +4.0dB */
+ 0x6b8001ae, /* 39, +4.5dB */
+ 0x71c001c7, /* 40, +5.0dB */
+ 0x788001e2, /* 41, +5.5dB */
+ 0x7f8001fe /* 42, +6.0dB */
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB */
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 5, -13.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */
+};
+
+static const u32 edca_setting_dl[PEER_MAX] = {
+ 0xa44f, /* 0 UNKNOWN */
+ 0x5ea44f, /* 1 REALTEK_90 */
+ 0x5e4322, /* 2 REALTEK_92SE */
+ 0x5ea42b, /* 3 BROAD */
+ 0xa44f, /* 4 RAL */
+ 0xa630, /* 5 ATH */
+ 0x5ea630, /* 6 CISCO */
+ 0x5ea42b, /* 7 MARVELL */
+};
+
+static const u32 edca_setting_ul[PEER_MAX] = {
+ 0x5e4322, /* 0 UNKNOWN */
+ 0xa44f, /* 1 REALTEK_90 */
+ 0x5ea44f, /* 2 REALTEK_92SE */
+ 0x5ea32b, /* 3 BROAD */
+ 0x5ea422, /* 4 RAL */
+ 0x5ea322, /* 5 ATH */
+ 0x3ea430, /* 6 CISCO */
+ 0x5ea44f, /* 7 MARV */
+};
+
+void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
+ u8 *pdirection, u32 *poutwrite_val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 pwr_val = 0;
+ u8 ofdm_base = rtlpriv->dm.swing_idx_ofdm_base[RF90_PATH_A];
+ u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A];
+ u8 cck_base = rtldm->swing_idx_cck_base;
+ u8 cck_val = rtldm->swing_idx_cck;
+
+ if (type == 0) {
+ if (ofdm_val <= ofdm_base) {
+ *pdirection = 1;
+ pwr_val = ofdm_base - ofdm_val;
+ } else {
+ *pdirection = 2;
+ pwr_val = ofdm_val - ofdm_base;
+ }
+ } else if (type == 1) {
+ if (cck_val <= cck_base) {
+ *pdirection = 1;
+ pwr_val = cck_base - cck_val;
+ } else {
+ *pdirection = 2;
+ pwr_val = cck_val - cck_base;
+ }
+ }
+
+ if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1))
+ pwr_val = TXPWRTRACK_MAX_IDX;
+
+ *poutwrite_val = pwr_val | (pwr_val << 8) |
+ (pwr_val << 16) | (pwr_val << 24);
+}
+
+static void rtl8723be_dm_diginit(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+ dm_digtable->dig_enable_flag = true;
+ dm_digtable->cur_igvalue = rtl_get_bbreg(hw,
+ ROFDM0_XAAGCCORE1, 0x7f);
+ dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
+ dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
+ dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+ dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+ dm_digtable->rx_gain_max = DM_DIG_MAX;
+ dm_digtable->rx_gain_min = DM_DIG_MIN;
+ dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
+ dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
+ dm_digtable->pre_cck_cca_thres = 0xff;
+ dm_digtable->cur_cck_cca_thres = 0x83;
+ dm_digtable->forbidden_igi = DM_DIG_MIN;
+ dm_digtable->large_fa_hit = 0;
+ dm_digtable->recover_cnt = 0;
+ dm_digtable->dig_min_0 = DM_DIG_MIN;
+ dm_digtable->dig_min_1 = DM_DIG_MIN;
+ dm_digtable->media_connect_0 = false;
+ dm_digtable->media_connect_1 = false;
+ rtlpriv->dm.dm_initialgain_enable = true;
+ dm_digtable->bt30_cur_igi = 0x32;
+}
+
+void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rate_adaptive *ra = &(rtlpriv->ra);
+
+ ra->ratr_state = DM_RATR_STA_INIT;
+ ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+ if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+ rtlpriv->dm.useramask = true;
+ else
+ rtlpriv->dm.useramask = false;
+
+ ra->high_rssi_thresh_for_ra = 50;
+ ra->low_rssi_thresh_for_ra40m = 20;
+}
+
+static void rtl8723be_dm_init_txpower_tracking(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.txpower_tracking = true;
+ rtlpriv->dm.txpower_track_control = true;
+ rtlpriv->dm.thermalvalue = 0;
+
+ rtlpriv->dm.ofdm_index[0] = 30;
+ rtlpriv->dm.cck_index = 20;
+
+ rtlpriv->dm.swing_idx_cck_base = rtlpriv->dm.cck_index;
+
+ rtlpriv->dm.swing_idx_ofdm_base[0] = rtlpriv->dm.ofdm_index[0];
+ rtlpriv->dm.delta_power_index[RF90_PATH_A] = 0;
+ rtlpriv->dm.delta_power_index_last[RF90_PATH_A] = 0;
+ rtlpriv->dm.power_index_offset[RF90_PATH_A] = 0;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ " rtlpriv->dm.txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
+}
+
+static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap;
+ rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, 0x800);
+ rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL;
+}
+
+void rtl8723be_dm_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtl8723be_dm_diginit(hw);
+ rtl8723be_dm_init_rate_adaptive_mask(hw);
+ rtl8723_dm_init_edca_turbo(hw);
+ rtl8723_dm_init_dynamic_bb_powersaving(hw);
+ rtl8723_dm_init_dynamic_txpower(hw);
+ rtl8723be_dm_init_txpower_tracking(hw);
+ rtl8723be_dm_init_dynamic_atc_switch(hw);
+}
+
+static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *rtl_dm_dig = &(rtlpriv->dm_digtable);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ /* Determine the minimum RSSI */
+ if ((mac->link_state < MAC80211_LINKED) &&
+ (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
+ rtl_dm_dig->min_undec_pwdb_for_dm = 0;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
+ }
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_dm_dig->min_undec_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
+ } else {
+ rtl_dm_dig->min_undec_pwdb_for_dm =
+ rtlpriv->dm.undec_sm_pwdb;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
+ }
+ } else {
+ rtl_dm_dig->min_undec_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnet PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
+ }
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
+}
+
+static void rtl8723be_dm_check_rssi_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *drv_priv;
+ u8 h2c_parameter[3] = { 0 };
+ long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
+
+ /* AP & ADHOC & MESH */
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+ if (drv_priv->rssi_stat.undec_sm_pwdb <
+ tmp_entry_min_pwdb)
+ tmp_entry_min_pwdb =
+ drv_priv->rssi_stat.undec_sm_pwdb;
+ if (drv_priv->rssi_stat.undec_sm_pwdb >
+ tmp_entry_max_pwdb)
+ tmp_entry_max_pwdb =
+ drv_priv->rssi_stat.undec_sm_pwdb;
+ }
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+ /* If associated entry is found */
+ if (tmp_entry_max_pwdb != 0) {
+ rtlpriv->dm.entry_max_undec_sm_pwdb = tmp_entry_max_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "EntryMaxPWDB = 0x%lx(%ld)\n",
+ tmp_entry_max_pwdb, tmp_entry_max_pwdb);
+ } else {
+ rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
+ }
+ /* If associated entry is found */
+ if (tmp_entry_min_pwdb != 0xff) {
+ rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "EntryMinPWDB = 0x%lx(%ld)\n",
+ tmp_entry_min_pwdb, tmp_entry_min_pwdb);
+ } else {
+ rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
+ }
+ /* Indicate Rx signal strength to FW. */
+ if (rtlpriv->dm.useramask) {
+ h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
+ h2c_parameter[1] = 0x20;
+ h2c_parameter[0] = 0;
+ rtl8723be_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+ } else {
+ rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb);
+ }
+ rtl8723be_dm_find_minimum_rssi(hw);
+ rtlpriv->dm_digtable.rssi_val_min =
+ rtlpriv->dm_digtable.min_undec_pwdb_for_dm;
+}
+
+void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dm_digtable.cur_igvalue != current_igi) {
+ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, current_igi);
+ if (rtlpriv->phy.rf_type != RF_1T1R)
+ rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, current_igi);
+ }
+ rtlpriv->dm_digtable.pre_igvalue = rtlpriv->dm_digtable.cur_igvalue;
+ rtlpriv->dm_digtable.cur_igvalue = current_igi;
+}
+
+static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct dig_t *dm_digtable = &(rtlpriv->dm_digtable);
+ u8 dig_dynamic_min, dig_maxofmin;
+ bool firstconnect, firstdisconnect;
+ u8 dm_dig_max, dm_dig_min;
+ u8 current_igi = dm_digtable->cur_igvalue;
+ u8 offset;
+
+ /* AP, BT */
+ if (mac->act_scanning)
+ return;
+
+ dig_dynamic_min = dm_digtable->dig_min_0;
+ firstconnect = (mac->link_state >= MAC80211_LINKED) &&
+ !dm_digtable->media_connect_0;
+ firstdisconnect = (mac->link_state < MAC80211_LINKED) &&
+ dm_digtable->media_connect_0;
+
+ dm_dig_max = 0x5a;
+ dm_dig_min = DM_DIG_MIN;
+ dig_maxofmin = DM_DIG_MAX_AP;
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ if ((dm_digtable->rssi_val_min + 10) > dm_dig_max)
+ dm_digtable->rx_gain_max = dm_dig_max;
+ else if ((dm_digtable->rssi_val_min + 10) < dm_dig_min)
+ dm_digtable->rx_gain_max = dm_dig_min;
+ else
+ dm_digtable->rx_gain_max =
+ dm_digtable->rssi_val_min + 10;
+
+ if (rtlpriv->dm.one_entry_only) {
+ offset = 12;
+ if (dm_digtable->rssi_val_min - offset < dm_dig_min)
+ dig_dynamic_min = dm_dig_min;
+ else if (dm_digtable->rssi_val_min - offset >
+ dig_maxofmin)
+ dig_dynamic_min = dig_maxofmin;
+ else
+ dig_dynamic_min =
+ dm_digtable->rssi_val_min - offset;
+ } else {
+ dig_dynamic_min = dm_dig_min;
+ }
+ } else {
+ dm_digtable->rx_gain_max = dm_dig_max;
+ dig_dynamic_min = dm_dig_min;
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
+ }
+
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
+ if (dm_digtable->large_fa_hit != 3)
+ dm_digtable->large_fa_hit++;
+ if (dm_digtable->forbidden_igi < current_igi) {
+ dm_digtable->forbidden_igi = current_igi;
+ dm_digtable->large_fa_hit = 1;
+ }
+
+ if (dm_digtable->large_fa_hit >= 3) {
+ if ((dm_digtable->forbidden_igi + 1) >
+ dm_digtable->rx_gain_max)
+ dm_digtable->rx_gain_min =
+ dm_digtable->rx_gain_max;
+ else
+ dm_digtable->rx_gain_min =
+ dm_digtable->forbidden_igi + 1;
+ dm_digtable->recover_cnt = 3600;
+ }
+ } else {
+ if (dm_digtable->recover_cnt != 0) {
+ dm_digtable->recover_cnt--;
+ } else {
+ if (dm_digtable->large_fa_hit < 3) {
+ if ((dm_digtable->forbidden_igi - 1) <
+ dig_dynamic_min) {
+ dm_digtable->forbidden_igi =
+ dig_dynamic_min;
+ dm_digtable->rx_gain_min =
+ dig_dynamic_min;
+ } else {
+ dm_digtable->forbidden_igi--;
+ dm_digtable->rx_gain_min =
+ dm_digtable->forbidden_igi + 1;
+ }
+ } else {
+ dm_digtable->large_fa_hit = 0;
+ }
+ }
+ }
+ if (dm_digtable->rx_gain_min > dm_digtable->rx_gain_max)
+ dm_digtable->rx_gain_min = dm_digtable->rx_gain_max;
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (firstconnect) {
+ if (dm_digtable->rssi_val_min <= dig_maxofmin)
+ current_igi = dm_digtable->rssi_val_min;
+ else
+ current_igi = dig_maxofmin;
+
+ dm_digtable->large_fa_hit = 0;
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
+ current_igi += 4;
+ else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1)
+ current_igi += 2;
+ else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+ current_igi -= 2;
+ }
+ } else {
+ if (firstdisconnect) {
+ current_igi = dm_digtable->rx_gain_min;
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000)
+ current_igi += 4;
+ else if (rtlpriv->falsealm_cnt.cnt_all > 8000)
+ current_igi += 2;
+ else if (rtlpriv->falsealm_cnt.cnt_all < 500)
+ current_igi -= 2;
+ }
+ }
+
+ if (current_igi > dm_digtable->rx_gain_max)
+ current_igi = dm_digtable->rx_gain_max;
+ else if (current_igi < dm_digtable->rx_gain_min)
+ current_igi = dm_digtable->rx_gain_min;
+
+ rtl8723be_dm_write_dig(hw, current_igi);
+ dm_digtable->media_connect_0 =
+ ((mac->link_state >= MAC80211_LINKED) ? true : false);
+ dm_digtable->dig_min_0 = dig_dynamic_min;
+}
+
+static void rtl8723be_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+ u32 ret_value;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 1);
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE1_11N, MASKDWORD);
+ falsealm_cnt->cnt_fast_fsync_fail = ret_value & 0xffff;
+ falsealm_cnt->cnt_sb_search_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE2_11N, MASKDWORD);
+ falsealm_cnt->cnt_ofdm_cca = ret_value & 0xffff;
+ falsealm_cnt->cnt_parity_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE3_11N, MASKDWORD);
+ falsealm_cnt->cnt_rate_illegal = ret_value & 0xffff;
+ falsealm_cnt->cnt_crc8_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE4_11N, MASKDWORD);
+ falsealm_cnt->cnt_mcs_fail = ret_value & 0xffff;
+
+ falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail;
+
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(12), 1);
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(14), 1);
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_RST_11N, MASKBYTE0);
+ falsealm_cnt->cnt_cck_fail = ret_value;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_MSB_11N, MASKBYTE3);
+ falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+
+ ret_value = rtl_get_bbreg(hw, DM_REG_CCK_CCA_CNT_11N, MASKDWORD);
+ falsealm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) |
+ ((ret_value & 0xff00) >> 8);
+
+ falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail +
+ falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_cck_fail;
+
+ falsealm_cnt->cnt_cca_all = falsealm_cnt->cnt_ofdm_cca +
+ falsealm_cnt->cnt_cck_cca;
+
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 1);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 0);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 1);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 0);
+
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 0);
+ rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 0);
+
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 0);
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 2);
+
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0);
+ rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, "
+ "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail,
+ falsealm_cnt->cnt_mcs_fail);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x,"
+ " cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail,
+ falsealm_cnt->cnt_all);
+}
+
+static void rtl8723be_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ /* 8723BE does not support ODM_BB_DYNAMIC_TXPWR*/
+ return;
+}
+
+static void rtl8723be_set_iqk_matrix(struct ieee80211_hw *hw, u8 ofdm_index,
+ u8 rfpath, long iqk_result_x,
+ long iqk_result_y)
+{
+ long ele_a = 0, ele_d, ele_c = 0, value32;
+
+ if (ofdm_index >= 43)
+ ofdm_index = 43 - 1;
+
+ ele_d = (ofdmswing_table[ofdm_index] & 0xFFC00000) >> 22;
+
+ if (iqk_result_x != 0) {
+ if ((iqk_result_x & 0x00000200) != 0)
+ iqk_result_x = iqk_result_x | 0xFFFFFC00;
+ ele_a = ((iqk_result_x * ele_d) >> 8) & 0x000003FF;
+
+ if ((iqk_result_y & 0x00000200) != 0)
+ iqk_result_y = iqk_result_y | 0xFFFFFC00;
+ ele_c = ((iqk_result_y * ele_d) >> 8) & 0x000003FF;
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ value32 = (ele_d << 22) |
+ ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ value32);
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32);
+ value32 = ((iqk_result_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
+ value32);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (rfpath) {
+ case RF90_PATH_A:
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ ofdmswing_table[ofdm_index]);
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void rtl8723be_dm_tx_power_track_set_power(struct ieee80211_hw *hw,
+ enum pwr_track_control_method method,
+ u8 rfpath, u8 idx)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 swing_idx_ofdm_limit = 36;
+
+ if (method == TXAGC) {
+ rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel);
+ } else if (method == BBSWING) {
+ if (rtldm->swing_idx_cck >= CCK_TABLE_SIZE)
+ rtldm->swing_idx_cck = CCK_TABLE_SIZE - 1;
+
+ if (!rtldm->cck_inch14) {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch1ch13[rtldm->swing_idx_cck][7]);
+ } else {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch14[rtldm->swing_idx_cck][0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch14[rtldm->swing_idx_cck][1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch14[rtldm->swing_idx_cck][2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch14[rtldm->swing_idx_cck][3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch14[rtldm->swing_idx_cck][4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch14[rtldm->swing_idx_cck][5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch14[rtldm->swing_idx_cck][6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch14[rtldm->swing_idx_cck][7]);
+ }
+
+ if (rfpath == RF90_PATH_A) {
+ if (rtldm->swing_idx_ofdm[RF90_PATH_A] <
+ swing_idx_ofdm_limit)
+ swing_idx_ofdm_limit =
+ rtldm->swing_idx_ofdm[RF90_PATH_A];
+
+ rtl8723be_set_iqk_matrix(hw,
+ rtldm->swing_idx_ofdm[rfpath], rfpath,
+ rtlphy->iqk_matrix[idx].value[0][0],
+ rtlphy->iqk_matrix[idx].value[0][1]);
+ } else if (rfpath == RF90_PATH_B) {
+ if (rtldm->swing_idx_ofdm[RF90_PATH_B] <
+ swing_idx_ofdm_limit)
+ swing_idx_ofdm_limit =
+ rtldm->swing_idx_ofdm[RF90_PATH_B];
+
+ rtl8723be_set_iqk_matrix(hw,
+ rtldm->swing_idx_ofdm[rfpath], rfpath,
+ rtlphy->iqk_matrix[idx].value[0][4],
+ rtlphy->iqk_matrix[idx].value[0][5]);
+ }
+ } else {
+ return;
+ }
+}
+
+static void txpwr_track_cb_therm(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 thermalvalue = 0, delta, delta_lck, delta_iqk;
+ u8 thermalvalue_avg_count = 0;
+ u32 thermalvalue_avg = 0;
+ int i = 0;
+
+ u8 ofdm_min_index = 6;
+ u8 index = 0;
+
+ char delta_swing_table_idx_tup_a[] = {
+ 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 14, 15};
+ char delta_swing_table_idx_tdown_a[] = {
+ 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 12, 13, 14, 15};
+
+ /*Initilization ( 7 steps in total)*/
+ rtlpriv->dm.txpower_trackinginit = true;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "rtl8723be_dm_txpower_tracking"
+ "_callback_thermalmeter\n");
+
+ thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00);
+ if (!rtlpriv->dm.txpower_track_control || thermalvalue == 0 ||
+ rtlefuse->eeprom_thermalmeter == 0xFF)
+ return;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+ "eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtldm->thermalvalue,
+ rtlefuse->eeprom_thermalmeter);
+ /*3 Initialize ThermalValues of RFCalibrateInfo*/
+ if (!rtldm->thermalvalue) {
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ }
+
+ /*4 Calculate average thermal meter*/
+ rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermalvalue;
+ rtldm->thermalvalue_avg_index++;
+ if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8723BE)
+ rtldm->thermalvalue_avg_index = 0;
+
+ for (i = 0; i < AVG_THERMAL_NUM_8723BE; i++) {
+ if (rtldm->thermalvalue_avg[i]) {
+ thermalvalue_avg += rtldm->thermalvalue_avg[i];
+ thermalvalue_avg_count++;
+ }
+ }
+
+ if (thermalvalue_avg_count)
+ thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count);
+
+ /* 5 Calculate delta, delta_LCK, delta_IQK.*/
+ delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue) :
+ (rtlpriv->dm.thermalvalue - thermalvalue);
+ delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
+ (rtlpriv->dm.thermalvalue_lck - thermalvalue);
+ delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
+ (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+ "eeprom_thermalmeter 0x%x delta 0x%x "
+ "delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk);
+ /* 6 If necessary, do LCK.*/
+ if (delta_lck >= IQK_THRESHOLD) {
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtl8723be_phy_lc_calibrate(hw);
+ }
+
+ /* 7 If necessary, move the index of
+ * swing table to adjust Tx power.
+ */
+ if (delta > 0 && rtlpriv->dm.txpower_track_control) {
+ delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
+ (thermalvalue - rtlefuse->eeprom_thermalmeter) :
+ (rtlefuse->eeprom_thermalmeter - thermalvalue);
+
+ if (delta >= TXSCALE_TABLE_SIZE)
+ delta = TXSCALE_TABLE_SIZE - 1;
+ /* 7.1 Get the final CCK_index and
+ * OFDM_index for each swing table.
+ */
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+ rtldm->delta_power_index_last[RF90_PATH_A] =
+ rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] =
+ delta_swing_table_idx_tup_a[delta];
+ } else {
+ rtldm->delta_power_index_last[RF90_PATH_A] =
+ rtldm->delta_power_index[RF90_PATH_A];
+ rtldm->delta_power_index[RF90_PATH_A] =
+ -1 * delta_swing_table_idx_tdown_a[delta];
+ }
+
+ /* 7.2 Handle boundary conditions of index.*/
+ if (rtldm->delta_power_index[RF90_PATH_A] ==
+ rtldm->delta_power_index_last[RF90_PATH_A])
+ rtldm->power_index_offset[RF90_PATH_A] = 0;
+ else
+ rtldm->power_index_offset[RF90_PATH_A] =
+ rtldm->delta_power_index[RF90_PATH_A] -
+ rtldm->delta_power_index_last[RF90_PATH_A];
+
+ rtldm->ofdm_index[0] =
+ rtldm->swing_idx_ofdm_base[RF90_PATH_A] +
+ rtldm->power_index_offset[RF90_PATH_A];
+ rtldm->cck_index = rtldm->swing_idx_cck_base +
+ rtldm->power_index_offset[RF90_PATH_A];
+
+ rtldm->swing_idx_cck = rtldm->cck_index;
+ rtldm->swing_idx_ofdm[0] = rtldm->ofdm_index[0];
+
+ if (rtldm->ofdm_index[0] > OFDM_TABLE_SIZE - 1)
+ rtldm->ofdm_index[0] = OFDM_TABLE_SIZE - 1;
+ else if (rtldm->ofdm_index[0] < ofdm_min_index)
+ rtldm->ofdm_index[0] = ofdm_min_index;
+
+ if (rtldm->cck_index > CCK_TABLE_SIZE - 1)
+ rtldm->cck_index = CCK_TABLE_SIZE - 1;
+ else if (rtldm->cck_index < 0)
+ rtldm->cck_index = 0;
+ } else {
+ rtldm->power_index_offset[RF90_PATH_A] = 0;
+ }
+
+ if ((rtldm->power_index_offset[RF90_PATH_A] != 0) &&
+ (rtldm->txpower_track_control)) {
+ rtldm->done_txpower = true;
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter)
+ rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0,
+ index);
+ else
+ rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0,
+ index);
+
+ rtldm->swing_idx_cck_base = rtldm->swing_idx_cck;
+ rtldm->swing_idx_ofdm_base[RF90_PATH_A] =
+ rtldm->swing_idx_ofdm[0];
+ rtldm->thermalvalue = thermalvalue;
+ }
+
+ if (delta_iqk >= IQK_THRESHOLD) {
+ rtldm->thermalvalue_iqk = thermalvalue;
+ rtl8723be_phy_iq_calibrate(hw, false);
+ }
+
+ rtldm->txpowercount = 0;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
+}
+
+void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ static u8 tm_trigger;
+
+ if (!rtlpriv->dm.txpower_tracking)
+ return;
+
+ if (!tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | BIT(16),
+ 0x03);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 8723be Thermal Meter!!\n");
+ tm_trigger = 1;
+ return;
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking !!\n");
+ txpwr_track_cb_therm(hw);
+ tm_trigger = 0;
+ }
+}
+
+static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rate_adaptive *ra = &(rtlpriv->ra);
+ struct ieee80211_sta *sta = NULL;
+ u32 low_rssithresh_for_ra = ra->low2high_rssi_thresh_for_ra40m;
+ u32 high_rssithresh_for_ra = ra->high_rssi_thresh_for_ra;
+ u8 go_up_gap = 5;
+
+ if (is_hal_stop(rtlhal)) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
+ return;
+ }
+
+ if (!rtlpriv->dm.useramask) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
+ return;
+ }
+
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION) {
+ switch (ra->pre_ratr_state) {
+ case DM_RATR_STA_MIDDLE:
+ high_rssithresh_for_ra += go_up_gap;
+ break;
+ case DM_RATR_STA_LOW:
+ high_rssithresh_for_ra += go_up_gap;
+ low_rssithresh_for_ra += go_up_gap;
+ break;
+ default:
+ break;
+ }
+
+ if (rtlpriv->dm.undec_sm_pwdb >
+ (long)high_rssithresh_for_ra)
+ ra->ratr_state = DM_RATR_STA_HIGH;
+ else if (rtlpriv->dm.undec_sm_pwdb >
+ (long)low_rssithresh_for_ra)
+ ra->ratr_state = DM_RATR_STA_MIDDLE;
+ else
+ ra->ratr_state = DM_RATR_STA_LOW;
+
+ if (ra->pre_ratr_state != ra->ratr_state) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", ra->ratr_state);
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ ra->pre_ratr_state, ra->ratr_state);
+
+ rcu_read_lock();
+ sta = rtl_find_sta(hw, mac->bssid);
+ if (sta)
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
+ ra->ratr_state);
+ rcu_read_unlock();
+
+ ra->pre_ratr_state = ra->ratr_state;
+ }
+ }
+}
+
+static bool rtl8723be_dm_is_edca_turbo_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->cfg->ops->get_btc_status()) {
+ if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv))
+ return true;
+ }
+ if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
+ return true;
+
+ return false;
+}
+
+static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ static u64 last_txok_cnt;
+ static u64 last_rxok_cnt;
+ u64 cur_txok_cnt = 0;
+ u64 cur_rxok_cnt = 0;
+ u32 edca_be_ul = 0x6ea42b;
+ u32 edca_be_dl = 0x6ea42b;/*not sure*/
+ u32 edca_be = 0x5ea42b;
+ u32 iot_peer = 0;
+ bool is_cur_rdlstate;
+ bool last_is_cur_rdlstate = false;
+ bool bias_on_rx = false;
+ bool edca_turbo_on = false;
+
+ last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate;
+
+ cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+ cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+
+ iot_peer = rtlpriv->mac80211.vendor;
+ bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ?
+ true : false;
+ edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) &&
+ (!rtlpriv->dm.disable_framebursting)) ?
+ true : false;
+
+ if ((iot_peer == PEER_CISCO) &&
+ (mac->mode == WIRELESS_MODE_N_24G)) {
+ edca_be_dl = edca_setting_dl[iot_peer];
+ edca_be_ul = edca_setting_ul[iot_peer];
+ }
+ if (rtl8723be_dm_is_edca_turbo_disable(hw))
+ goto exit;
+
+ if (edca_turbo_on) {
+ if (bias_on_rx)
+ is_cur_rdlstate = (cur_txok_cnt > cur_rxok_cnt * 4) ?
+ false : true;
+ else
+ is_cur_rdlstate = (cur_rxok_cnt > cur_txok_cnt * 4) ?
+ true : false;
+
+ edca_be = (is_cur_rdlstate) ? edca_be_dl : edca_be_ul;
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be);
+ rtlpriv->dm.is_cur_rdlstate = is_cur_rdlstate;
+ rtlpriv->dm.current_turbo_edca = true;
+ } else {
+ if (rtlpriv->dm.current_turbo_edca) {
+ u8 tmp = AC0_BE;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
+ }
+ rtlpriv->dm.current_turbo_edca = false;
+ }
+
+exit:
+ rtlpriv->dm.is_any_nonbepkts = false;
+ last_txok_cnt = rtlpriv->stats.txbytesunicast;
+ last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl8723be_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 cur_cck_cca_thresh;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ if (rtlpriv->dm_digtable.rssi_val_min > 25) {
+ cur_cck_cca_thresh = 0xcd;
+ } else if ((rtlpriv->dm_digtable.rssi_val_min <= 25) &&
+ (rtlpriv->dm_digtable.rssi_val_min > 10)) {
+ cur_cck_cca_thresh = 0x83;
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+ cur_cck_cca_thresh = 0x83;
+ else
+ cur_cck_cca_thresh = 0x40;
+ }
+ } else {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+ cur_cck_cca_thresh = 0x83;
+ else
+ cur_cck_cca_thresh = 0x40;
+ }
+
+ if (rtlpriv->dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh)
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh);
+
+ rtlpriv->dm_digtable.pre_cck_cca_thres = rtlpriv->dm_digtable.cur_cck_cca_thres;
+ rtlpriv->dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh;
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCK cca thresh hold =%x\n",
+ rtlpriv->dm_digtable.cur_cck_cca_thres);
+}
+
+static void rtl8723be_dm_dynamic_edcca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 reg_c50, reg_c58;
+ bool fw_current_in_ps_mode = false;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_in_ps_mode));
+ if (fw_current_in_ps_mode)
+ return;
+
+ reg_c50 = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ reg_c58 = rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+
+ if (reg_c50 > 0x28 && reg_c58 > 0x28) {
+ if (!rtlpriv->rtlhal.pre_edcca_enable) {
+ rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x03);
+ rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x00);
+ }
+ } else if (reg_c50 < 0x25 && reg_c58 < 0x25) {
+ if (rtlpriv->rtlhal.pre_edcca_enable) {
+ rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x7f);
+ rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x7f);
+ }
+ }
+}
+
+static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ u8 crystal_cap;
+ u32 packet_count;
+ int cfo_khz_a, cfo_khz_b, cfo_ave = 0, adjust_xtal = 0;
+ int cfo_ave_diff;
+
+ if (rtlpriv->mac80211.link_state < MAC80211_LINKED) {
+ if (rtldm->atc_status == ATC_STATUS_OFF) {
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
+ ATC_STATUS_ON);
+ rtldm->atc_status = ATC_STATUS_ON;
+ }
+ if (rtlpriv->cfg->ops->get_btc_status()) {
+ if (!rtlpriv->btcoexist.btc_ops->btc_is_bt_disabled(rtlpriv)) {
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "odm_DynamicATCSwitch(): Disable"
+ " CFO tracking for BT!!\n");
+ return;
+ }
+ }
+
+ if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) {
+ rtldm->crystal_cap = rtlpriv->efuse.crystalcap;
+ crystal_cap = rtldm->crystal_cap & 0x3f;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
+ (crystal_cap | (crystal_cap << 6)));
+ }
+ } else {
+ cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280;
+ cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280;
+ packet_count = rtldm->packet_count;
+
+ if (packet_count == rtldm->packet_count_pre)
+ return;
+
+ rtldm->packet_count_pre = packet_count;
+
+ if (rtlpriv->phy.rf_type == RF_1T1R)
+ cfo_ave = cfo_khz_a;
+ else
+ cfo_ave = (int)(cfo_khz_a + cfo_khz_b) >> 1;
+
+ cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave) ?
+ (rtldm->cfo_ave_pre - cfo_ave) :
+ (cfo_ave - rtldm->cfo_ave_pre);
+
+ if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
+ rtldm->large_cfo_hit = 1;
+ return;
+ } else {
+ rtldm->large_cfo_hit = 0;
+ }
+
+ rtldm->cfo_ave_pre = cfo_ave;
+
+ if (cfo_ave >= -rtldm->cfo_threshold &&
+ cfo_ave <= rtldm->cfo_threshold && rtldm->is_freeze == 0) {
+ if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL) {
+ rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10;
+ rtldm->is_freeze = 1;
+ } else {
+ rtldm->cfo_threshold = CFO_THRESHOLD_XTAL;
+ }
+ }
+
+ if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f)
+ adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 1) + 1;
+ else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) &&
+ rtlpriv->dm.crystal_cap > 0)
+ adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 1) - 1;
+
+ if (adjust_xtal != 0) {
+ rtldm->is_freeze = 0;
+ rtldm->crystal_cap += adjust_xtal;
+
+ if (rtldm->crystal_cap > 0x3f)
+ rtldm->crystal_cap = 0x3f;
+ else if (rtldm->crystal_cap < 0)
+ rtldm->crystal_cap = 0;
+
+ crystal_cap = rtldm->crystal_cap & 0x3f;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
+ (crystal_cap | (crystal_cap << 6)));
+ }
+
+ if (cfo_ave < CFO_THRESHOLD_ATC &&
+ cfo_ave > -CFO_THRESHOLD_ATC) {
+ if (rtldm->atc_status == ATC_STATUS_ON) {
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
+ ATC_STATUS_OFF);
+ rtldm->atc_status = ATC_STATUS_OFF;
+ }
+ } else {
+ if (rtldm->atc_status == ATC_STATUS_OFF) {
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
+ ATC_STATUS_ON);
+ rtldm->atc_status = ATC_STATUS_ON;
+ }
+ }
+ }
+}
+
+static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_sta_info *drv_priv;
+ u8 cnt = 0;
+
+ rtlpriv->dm.one_entry_only = false;
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION &&
+ rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+ rtlpriv->dm.one_entry_only = true;
+ return;
+ }
+
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP ||
+ rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC ||
+ rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) {
+ spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+ list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+ cnt++;
+ }
+ spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+ if (cnt == 1)
+ rtlpriv->dm.one_entry_only = true;
+ }
+}
+
+void rtl8723be_dm_watchdog(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool fw_current_inpsmode = false;
+ bool fw_ps_awake = true;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inpsmode));
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+ (u8 *)(&fw_ps_awake));
+
+ if (ppsc->p2p_ps_info.p2p_ps_mode)
+ fw_ps_awake = false;
+
+ if ((ppsc->rfpwr_state == ERFON) &&
+ ((!fw_current_inpsmode) && fw_ps_awake) &&
+ (!ppsc->rfchange_inprogress)) {
+ rtl8723be_dm_common_info_self_update(hw);
+ rtl8723be_dm_false_alarm_counter_statistics(hw);
+ rtl8723be_dm_check_rssi_monitor(hw);
+ rtl8723be_dm_dig(hw);
+ rtl8723be_dm_dynamic_edcca(hw);
+ rtl8723be_dm_cck_packet_detection_thresh(hw);
+ rtl8723be_dm_refresh_rate_adaptive_mask(hw);
+ rtl8723be_dm_check_edca_turbo(hw);
+ rtl8723be_dm_dynamic_atc_switch(hw);
+ rtl8723be_dm_check_txpower_tracking(hw);
+ rtl8723be_dm_dynamic_txpower(hw);
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv);
+ }
+ rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
new file mode 100644
index 000000000000..c6c2f2a78a66
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
@@ -0,0 +1,310 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_DM_H__
+#define __RTL8723BE_DM_H__
+
+#define MAIN_ANT 0
+#define AUX_ANT 1
+#define MAIN_ANT_CG_TRX 1
+#define AUX_ANT_CG_TRX 0
+#define MAIN_ANT_CGCS_RX 0
+#define AUX_ANT_CGCS_RX 1
+
+#define TXSCALE_TABLE_SIZE 30
+
+/*RF REG LIST*/
+#define DM_REG_RF_MODE_11N 0x00
+#define DM_REG_RF_0B_11N 0x0B
+#define DM_REG_CHNBW_11N 0x18
+#define DM_REG_T_METER_11N 0x24
+#define DM_REG_RF_25_11N 0x25
+#define DM_REG_RF_26_11N 0x26
+#define DM_REG_RF_27_11N 0x27
+#define DM_REG_RF_2B_11N 0x2B
+#define DM_REG_RF_2C_11N 0x2C
+#define DM_REG_RXRF_A3_11N 0x3C
+#define DM_REG_T_METER_92D_11N 0x42
+#define DM_REG_T_METER_88E_11N 0x42
+
+/*BB REG LIST*/
+/*PAGE 8 */
+#define DM_REG_BB_CTRL_11N 0x800
+#define DM_REG_RF_PIN_11N 0x804
+#define DM_REG_PSD_CTRL_11N 0x808
+#define DM_REG_TX_ANT_CTRL_11N 0x80C
+#define DM_REG_BB_PWR_SAV5_11N 0x818
+#define DM_REG_CCK_RPT_FORMAT_11N 0x824
+#define DM_REG_RX_DEFUALT_A_11N 0x858
+#define DM_REG_RX_DEFUALT_B_11N 0x85A
+#define DM_REG_BB_PWR_SAV3_11N 0x85C
+#define DM_REG_ANTSEL_CTRL_11N 0x860
+#define DM_REG_RX_ANT_CTRL_11N 0x864
+#define DM_REG_PIN_CTRL_11N 0x870
+#define DM_REG_BB_PWR_SAV1_11N 0x874
+#define DM_REG_ANTSEL_PATH_11N 0x878
+#define DM_REG_BB_3WIRE_11N 0x88C
+#define DM_REG_SC_CNT_11N 0x8C4
+#define DM_REG_PSD_DATA_11N 0x8B4
+/*PAGE 9*/
+#define DM_REG_ANT_MAPPING1_11N 0x914
+#define DM_REG_ANT_MAPPING2_11N 0x918
+/*PAGE A*/
+#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define DM_REG_CCK_CCA_11N 0xA0A
+#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
+#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10
+#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14
+#define DM_REG_CCK_FILTER_PARA1_11N 0xA22
+#define DM_REG_CCK_FILTER_PARA2_11N 0xA23
+#define DM_REG_CCK_FILTER_PARA3_11N 0xA24
+#define DM_REG_CCK_FILTER_PARA4_11N 0xA25
+#define DM_REG_CCK_FILTER_PARA5_11N 0xA26
+#define DM_REG_CCK_FILTER_PARA6_11N 0xA27
+#define DM_REG_CCK_FILTER_PARA7_11N 0xA28
+#define DM_REG_CCK_FILTER_PARA8_11N 0xA29
+#define DM_REG_CCK_FA_RST_11N 0xA2C
+#define DM_REG_CCK_FA_MSB_11N 0xA58
+#define DM_REG_CCK_FA_LSB_11N 0xA5C
+#define DM_REG_CCK_CCA_CNT_11N 0xA60
+#define DM_REG_BB_PWR_SAV4_11N 0xA74
+/*PAGE B */
+#define DM_REG_LNA_SWITCH_11N 0xB2C
+#define DM_REG_PATH_SWITCH_11N 0xB30
+#define DM_REG_RSSI_CTRL_11N 0xB38
+#define DM_REG_CONFIG_ANTA_11N 0xB68
+#define DM_REG_RSSI_BT_11N 0xB9C
+/*PAGE C */
+#define DM_REG_OFDM_FA_HOLDC_11N 0xC00
+#define DM_REG_RX_PATH_11N 0xC04
+#define DM_REG_TRMUX_11N 0xC08
+#define DM_REG_OFDM_FA_RSTC_11N 0xC0C
+#define DM_REG_RXIQI_MATRIX_11N 0xC14
+#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
+#define DM_REG_IGI_A_11N 0xC50
+#define DM_REG_ANTDIV_PARA2_11N 0xC54
+#define DM_REG_IGI_B_11N 0xC58
+#define DM_REG_ANTDIV_PARA3_11N 0xC5C
+#define DM_REG_BB_PWR_SAV2_11N 0xC70
+#define DM_REG_RX_OFF_11N 0xC7C
+#define DM_REG_TXIQK_MATRIXA_11N 0xC80
+#define DM_REG_TXIQK_MATRIXB_11N 0xC88
+#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
+#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
+#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
+#define DM_REG_ANTDIV_PARA1_11N 0xCA4
+#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0
+/*PAGE D */
+#define DM_REG_OFDM_FA_RSTD_11N 0xD00
+#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0
+#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4
+#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8
+/*PAGE E */
+#define DM_REG_TXAGC_A_6_18_11N 0xE00
+#define DM_REG_TXAGC_A_24_54_11N 0xE04
+#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08
+#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10
+#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14
+#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18
+#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C
+#define DM_REG_FPGA0_IQK_11N 0xE28
+#define DM_REG_TXIQK_TONE_A_11N 0xE30
+#define DM_REG_RXIQK_TONE_A_11N 0xE34
+#define DM_REG_TXIQK_PI_A_11N 0xE38
+#define DM_REG_RXIQK_PI_A_11N 0xE3C
+#define DM_REG_TXIQK_11N 0xE40
+#define DM_REG_RXIQK_11N 0xE44
+#define DM_REG_IQK_AGC_PTS_11N 0xE48
+#define DM_REG_IQK_AGC_RSP_11N 0xE4C
+#define DM_REG_BLUETOOTH_11N 0xE6C
+#define DM_REG_RX_WAIT_CCA_11N 0xE70
+#define DM_REG_TX_CCK_RFON_11N 0xE74
+#define DM_REG_TX_CCK_BBON_11N 0xE78
+#define DM_REG_OFDM_RFON_11N 0xE7C
+#define DM_REG_OFDM_BBON_11N 0xE80
+#define DM_REG_TX2RX_11N 0xE84
+#define DM_REG_TX2TX_11N 0xE88
+#define DM_REG_RX_CCK_11N 0xE8C
+#define DM_REG_RX_OFDM_11N 0xED0
+#define DM_REG_RX_WAIT_RIFS_11N 0xED4
+#define DM_REG_RX2RX_11N 0xED8
+#define DM_REG_STANDBY_11N 0xEDC
+#define DM_REG_SLEEP_11N 0xEE0
+#define DM_REG_PMPD_ANAEN_11N 0xEEC
+
+/*MAC REG LIST*/
+#define DM_REG_BB_RST_11N 0x02
+#define DM_REG_ANTSEL_PIN_11N 0x4C
+#define DM_REG_EARLY_MODE_11N 0x4D0
+#define DM_REG_RSSI_MONITOR_11N 0x4FE
+#define DM_REG_EDCA_VO_11N 0x500
+#define DM_REG_EDCA_VI_11N 0x504
+#define DM_REG_EDCA_BE_11N 0x508
+#define DM_REG_EDCA_BK_11N 0x50C
+#define DM_REG_TXPAUSE_11N 0x522
+#define DM_REG_RESP_TX_11N 0x6D8
+#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0
+#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4
+
+/*DIG Related*/
+#define DM_BIT_IGI_11N 0x0000007F
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 43
+#define CCK_TABLE_LENGTH 33
+
+#define OFDM_TABLE_SIZE 37
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX 0x3e
+#define DM_DIG_MIN 0x1e
+
+#define DM_DIG_MAX_AP 0x32
+#define DM_DIG_MIN_AP 0x20
+
+#define DM_DIG_FA_UPPER 0x3e
+#define DM_DIG_FA_LOWER 0x1e
+#define DM_DIG_FA_TH0 0x200
+#define DM_DIG_FA_TH1 0x300
+#define DM_DIG_FA_TH2 0x400
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define TXPWRTRACK_MAX_IDX 6
+
+/* Dynamic ATC switch */
+#define ATC_STATUS_OFF 0x0 /* enable */
+#define ATC_STATUS_ON 0x1 /* disable */
+#define CFO_THRESHOLD_XTAL 10 /* kHz */
+#define CFO_THRESHOLD_ATC 80 /* kHz */
+
+enum FAT_STATE {
+ FAT_NORMAL_STATE = 0,
+ FAT_TRAINING_STATE = 1,
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+enum dm_1r_cca_e {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+ DIG_STA_DISCONNECT = 0,
+ DIG_STA_CONNECT = 1,
+ DIG_STA_BEFORE_CONNECT = 2,
+ DIG_MULTISTA_DISCONNECT = 3,
+ DIG_MULTISTA_CONNECT = 4,
+ DIG_CONNECT_MAX
+};
+
+enum pwr_track_control_method {
+ BBSWING,
+ TXAGC
+};
+
+#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
+#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
+#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
+#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
+#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
+
+void rtl8723be_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, u8 *pdesc,
+ u32 mac_id);
+void rtl8723be_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux,
+ u32 mac_id, u32 rx_pwdb_all);
+void rtl8723be_dm_fast_antenna_trainning_callback(unsigned long data);
+void rtl8723be_dm_init(struct ieee80211_hw *hw);
+void rtl8723be_dm_watchdog(struct ieee80211_hw *hw);
+void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi);
+void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw);
+void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
+ u8 *pdirection, u32 *poutwrite_val);
+void rtl8723be_dm_init_edca_turbo(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
new file mode 100644
index 000000000000..f856be6fc138
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
@@ -0,0 +1,620 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+
+static bool _rtl8723be_check_fw_read_last_h2c(struct ieee80211_hw *hw,
+ u8 boxnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val_hmetfr;
+ bool result = false;
+
+ val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+ if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
+ result = true;
+ return result;
+}
+
+static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 boxnum;
+ u16 box_reg = 0, box_extreg = 0;
+ u8 u1b_tmp;
+ bool isfw_read = false;
+ u8 buf_index = 0;
+ bool bwrite_sucess = false;
+ u8 wait_h2c_limit = 100;
+ u8 wait_writeh2c_limit = 100;
+ u8 boxcontent[4], boxextcontent[4];
+ u32 h2c_waitcounter = 0;
+ unsigned long flag;
+ u8 idx;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+
+ while (true) {
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ if (rtlhal->h2c_setinprogress) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set.."
+ "element_id(%d).\n", element_id);
+
+ while (rtlhal->h2c_setinprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+ flag);
+ h2c_waitcounter++;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
+ udelay(100);
+
+ if (h2c_waitcounter > 1000)
+ return;
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+ flag);
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ } else {
+ rtlhal->h2c_setinprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ break;
+ }
+ }
+ while (!bwrite_sucess) {
+ wait_writeh2c_limit--;
+ if (wait_writeh2c_limit == 0) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Write H2C fail because no trigger "
+ "for FW INT!\n");
+ break;
+ }
+ boxnum = rtlhal->last_hmeboxnum;
+ switch (boxnum) {
+ case 0:
+ box_reg = REG_HMEBOX_0;
+ box_extreg = REG_HMEBOX_EXT_0;
+ break;
+ case 1:
+ box_reg = REG_HMEBOX_1;
+ box_extreg = REG_HMEBOX_EXT_1;
+ break;
+ case 2:
+ box_reg = REG_HMEBOX_2;
+ box_extreg = REG_HMEBOX_EXT_2;
+ break;
+ case 3:
+ box_reg = REG_HMEBOX_3;
+ box_extreg = REG_HMEBOX_EXT_3;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not processed\n");
+ break;
+ }
+ isfw_read = _rtl8723be_check_fw_read_last_h2c(hw, boxnum);
+ while (!isfw_read) {
+ wait_h2c_limit--;
+ if (wait_h2c_limit == 0) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wating too long for FW read "
+ "clear HMEBox(%d)!\n", boxnum);
+ break;
+ }
+ udelay(10);
+
+ isfw_read = _rtl8723be_check_fw_read_last_h2c(hw,
+ boxnum);
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wating for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
+ }
+ if (!isfw_read) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! "
+ "Fw do not read.\n", boxnum);
+ break;
+ }
+ memset(boxcontent, 0, sizeof(boxcontent));
+ memset(boxextcontent, 0, sizeof(boxextcontent));
+ boxcontent[0] = element_id;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
+
+ switch (cmd_len) {
+ case 1:
+ case 2:
+ case 3:
+ /*boxcontent[0] &= ~(BIT(7));*/
+ memcpy((u8 *)(boxcontent) + 1,
+ p_cmdbuffer + buf_index, cmd_len);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ /*boxcontent[0] |= (BIT(7));*/
+ memcpy((u8 *)(boxextcontent),
+ p_cmdbuffer + buf_index+3, cmd_len-3);
+ memcpy((u8 *)(boxcontent) + 1,
+ p_cmdbuffer + buf_index, 3);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+ }
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ bwrite_sucess = true;
+
+ rtlhal->last_hmeboxnum = boxnum + 1;
+ if (rtlhal->last_hmeboxnum == 4)
+ rtlhal->last_hmeboxnum = 0;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
+ }
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ rtlhal->h2c_setinprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+}
+
+void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 tmp_cmdbuf[2];
+
+ if (!rtlhal->fw_ready) {
+ RT_ASSERT(false,
+ "return H2C cmd because of Fw download fail!!!\n");
+ return;
+ }
+ memset(tmp_cmdbuf, 0, 8);
+ memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
+ _rtl8723be_fill_h2c_command(hw, element_id, cmd_len,
+ (u8 *)&tmp_cmdbuf);
+ return;
+}
+
+void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1_h2c_set_pwrmode[H2C_8723BE_PWEMODE_LENGTH] = { 0 };
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 rlbm, power_state = 0;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+
+ SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
+ rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM = 2.*/
+ SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
+ SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
+ (rtlpriv->mac80211.p2p) ?
+ ppsc->smart_ps : 1);
+ SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
+ ppsc->reg_max_lps_awakeintvl);
+ SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
+ if (mode == FW_PS_ACTIVE_MODE)
+ power_state |= FW_PWR_STATE_ACTIVE;
+ else
+ power_state |= FW_PWR_STATE_RF_OFF;
+ SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
+ u1_h2c_set_pwrmode, H2C_8723BE_PWEMODE_LENGTH);
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_SETPWRMODE,
+ H2C_8723BE_PWEMODE_LENGTH,
+ u1_h2c_set_pwrmode);
+}
+
+static bool _rtl8723be_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ struct sk_buff *pskb = NULL;
+ u8 own;
+ unsigned long flags;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ pdesc = &ring->desc[0];
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);
+
+ rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
+
+ __skb_queue_tail(&ring->queue, skb);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+ return true;
+}
+#define BEACON_PG 0 /* ->1 */
+#define PSPOLL_PG 2
+#define NULL_PG 3
+#define PROBERSP_PG 4 /* ->5 */
+
+#define TOTAL_RESERVED_PKT_LEN 768
+
+static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
+ /* page 0 beacon */
+ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
+ 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x10, 0x04, 0x00, 0x05, 0x54, 0x65,
+ 0x73, 0x74, 0x32, 0x01, 0x08, 0x82, 0x84, 0x0B,
+ 0x16, 0x24, 0x30, 0x48, 0x6C, 0x03, 0x01, 0x06,
+ 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x02, 0x32,
+ 0x04, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C,
+ 0x09, 0x03, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x3D, 0x00, 0xDD, 0x07, 0x00, 0xE0, 0x4C,
+ 0x02, 0x02, 0x00, 0x00, 0xDD, 0x18, 0x00, 0x50,
+ 0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04,
+ 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04, 0x01, 0x00,
+
+ /* page 1 beacon */
+ 0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 2 ps-poll */
+ 0xA4, 0x10, 0x01, 0xC0, 0xEC, 0x1A, 0x59, 0x0B,
+ 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 3 null */
+ 0x48, 0x01, 0x00, 0x00, 0xEC, 0x1A, 0x59, 0x0B,
+ 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
+ 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x72, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 4 probe_resp */
+ 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+ 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+ 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
+ 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+ 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+ 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+ 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+ 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+ 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+ 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 5 probe_resp */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+ bool dl_finished)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+
+ u32 totalpacketlen;
+ bool rtstatus;
+ u8 u1rsvdpageloc[5] = { 0 };
+ bool dlok = false;
+
+ u8 *beacon;
+ u8 *p_pspoll;
+ u8 *nullfunc;
+ u8 *p_probersp;
+ /*---------------------------------------------------------
+ * (1) beacon
+ *---------------------------------------------------------
+ */
+ beacon = &reserved_page_packet[BEACON_PG * 128];
+ SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+ /*-------------------------------------------------------
+ * (2) ps-poll
+ *-------------------------------------------------------
+ */
+ p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
+ SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+ SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+ SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG);
+
+ /*--------------------------------------------------------
+ * (3) null data
+ *--------------------------------------------------------
+ */
+ nullfunc = &reserved_page_packet[NULL_PG * 128];
+ SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+ SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG);
+
+ /*---------------------------------------------------------
+ * (4) probe response
+ *---------------------------------------------------------
+ */
+ p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
+ SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+ SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
+
+ totalpacketlen = TOTAL_RESERVED_PKT_LEN;
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "rtl8723be_set_fw_rsvdpagepkt(): "
+ "HW_VAR_SET_TX_CMD: ALL\n",
+ &reserved_page_packet[0], totalpacketlen);
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl8723be_set_fw_rsvdpagepkt(): "
+ "HW_VAR_SET_TX_CMD: ALL\n", u1rsvdpageloc, 3);
+
+
+ skb = dev_alloc_skb(totalpacketlen);
+ memcpy((u8 *)skb_put(skb, totalpacketlen),
+ &reserved_page_packet, totalpacketlen);
+
+ rtstatus = _rtl8723be_cmd_send_packet(hw, skb);
+
+ if (rtstatus)
+ dlok = true;
+
+ if (dlok) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n",
+ u1rsvdpageloc, 3);
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RSVDPAGE,
+ sizeof(u1rsvdpageloc), u1rsvdpageloc);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ }
+}
+
+/*Should check FW support p2p or not.*/
+static void rtl8723be_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw,
+ u8 ctwindow)
+{
+ u8 u1_ctwindow_period[1] = {ctwindow};
+
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_CTW_CMD, 1,
+ u1_ctwindow_period);
+}
+
+void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
+ u8 p2p_ps_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
+ struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
+ u8 i;
+ u16 ctwindow;
+ u32 start_time, tsf_low;
+
+ switch (p2p_ps_state) {
+ case P2P_PS_DISABLE:
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t));
+ break;
+ case P2P_PS_ENABLE:
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ /* update CTWindow value. */
+ if (p2pinfo->ctwindow > 0) {
+ p2p_ps_offload->ctwindow_en = 1;
+ ctwindow = p2pinfo->ctwindow;
+ rtl8723be_set_p2p_ctw_period_cmd(hw, ctwindow);
+ }
+ /* hw only support 2 set of NoA */
+ for (i = 0; i < p2pinfo->noa_num; i++) {
+ /* To control the register setting
+ * for which NOA
+ */
+ rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
+ if (i == 0)
+ p2p_ps_offload->noa0_en = 1;
+ else
+ p2p_ps_offload->noa1_en = 1;
+
+ /* config P2P NoA Descriptor Register */
+ rtl_write_dword(rtlpriv, 0x5E0,
+ p2pinfo->noa_duration[i]);
+ rtl_write_dword(rtlpriv, 0x5E4,
+ p2pinfo->noa_interval[i]);
+
+ /*Get Current TSF value */
+ tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+ start_time = p2pinfo->noa_start_time[i];
+ if (p2pinfo->noa_count_type[i] != 1) {
+ while (start_time <= (tsf_low + (50 * 1024))) {
+ start_time += p2pinfo->noa_interval[i];
+ if (p2pinfo->noa_count_type[i] != 255)
+ p2pinfo->noa_count_type[i]--;
+ }
+ }
+ rtl_write_dword(rtlpriv, 0x5E8, start_time);
+ rtl_write_dword(rtlpriv, 0x5EC,
+ p2pinfo->noa_count_type[i]);
+ }
+ if ((p2pinfo->opp_ps == 1) ||
+ (p2pinfo->noa_num > 0)) {
+ /* rst p2p circuit */
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
+
+ p2p_ps_offload->offload_en = 1;
+
+ if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
+ p2p_ps_offload->role = 1;
+ p2p_ps_offload->allstasleep = 0;
+ } else {
+ p2p_ps_offload->role = 0;
+ }
+ p2p_ps_offload->discovery = 0;
+ }
+ break;
+ case P2P_PS_SCAN:
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ p2p_ps_offload->discovery = 1;
+ break;
+ case P2P_PS_SCAN_DONE:
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ p2p_ps_offload->discovery = 0;
+ p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
+ break;
+ default:
+ break;
+ }
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_OFFLOAD, 1,
+ (u8 *)p2p_ps_offload);
+}
+
+void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+ u8 u1_joinbssrpt_parm[1] = { 0 };
+
+ SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_JOINBSSRPT, 1,
+ u1_joinbssrpt_parm);
+}
+
+void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,
+ u8 ap_offload_enable)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 u1_apoffload_parm[H2C_8723BE_AP_OFFLOAD_LENGTH] = { 0 };
+
+ SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable);
+ SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->hiddenssid);
+ SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0);
+
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_AP_OFFLOAD,
+ H2C_8723BE_AP_OFFLOAD_LENGTH, u1_apoffload_parm);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/rtlwifi/rtl8723be/fw.h
new file mode 100644
index 000000000000..31eec281e446
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.h
@@ -0,0 +1,248 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE__FW__H__
+#define __RTL8723BE__FW__H__
+
+#define FW_8192C_SIZE 0x8000
+#define FW_8192C_START_ADDRESS 0x1000
+#define FW_8192C_END_ADDRESS 0x5FFF
+#define FW_8192C_PAGE_SIZE 4096
+#define FW_8192C_POLLING_DELAY 5
+#define FW_8192C_POLLING_TIMEOUT_COUNT 6000
+
+#define IS_FW_HEADER_EXIST(_pfwhdr) \
+ ((_pfwhdr->signature&0xFFF0) == 0x5300)
+#define USE_OLD_WOWLAN_DEBUG_FW 0
+
+#define H2C_8723BE_RSVDPAGE_LOC_LEN 5
+#define H2C_8723BE_PWEMODE_LENGTH 5
+#define H2C_8723BE_JOINBSSRPT_LENGTH 1
+#define H2C_8723BE_AP_OFFLOAD_LENGTH 3
+#define H2C_8723BE_WOWLAN_LENGTH 3
+#define H2C_8723BE_KEEP_ALIVE_CTRL_LENGTH 3
+#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 1
+#else
+#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 3
+#endif
+#define H2C_8723BE_AOAC_GLOBAL_INFO_LEN 2
+#define H2C_8723BE_AOAC_RSVDPAGE_LOC_LEN 7
+
+
+/* Fw PS state for RPWM.
+*BIT[2:0] = HW state
+*BIT[3] = Protocol PS state, 1: register active state , 0: register sleep state
+*BIT[4] = sub-state
+*/
+#define FW_PS_GO_ON BIT(0)
+#define FW_PS_TX_NULL BIT(1)
+#define FW_PS_RF_ON BIT(2)
+#define FW_PS_REGISTER_ACTIVE BIT(3)
+
+#define FW_PS_DPS BIT(0)
+#define FW_PS_LCLK (FW_PS_DPS)
+#define FW_PS_RF_OFF BIT(1)
+#define FW_PS_ALL_ON BIT(2)
+#define FW_PS_ST_ACTIVE BIT(3)
+#define FW_PS_ISR_ENABLE BIT(4)
+#define FW_PS_IMR_ENABLE BIT(5)
+
+
+#define FW_PS_ACK BIT(6)
+#define FW_PS_TOGGLE BIT(7)
+
+ /* 88E RPWM value*/
+ /* BIT[0] = 1: 32k, 0: 40M*/
+#define FW_PS_CLOCK_OFF BIT(0) /* 32k*/
+#define FW_PS_CLOCK_ON 0 /*40M*/
+
+#define FW_PS_STATE_MASK (0x0F)
+#define FW_PS_STATE_HW_MASK (0x07)
+/*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/
+#define FW_PS_STATE_INT_MASK (0x3F)
+
+#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x))
+#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x))
+#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x))
+#define FW_PS_ISR_VAL(x) ((x) & 0x70)
+#define FW_PS_IMR_MASK(x) ((x) & 0xDF)
+#define FW_PS_KEEP_IMR(x) ((x) & 0x20)
+
+
+#define FW_PS_STATE_S0 (FW_PS_DPS)
+#define FW_PS_STATE_S1 (FW_PS_LCLK)
+#define FW_PS_STATE_S2 (FW_PS_RF_OFF)
+#define FW_PS_STATE_S3 (FW_PS_ALL_ON)
+#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON))
+
+/* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/
+#define FW_PS_STATE_ALL_ON_88E (FW_PS_CLOCK_ON)
+/* (FW_PS_RF_ON)*/
+#define FW_PS_STATE_RF_ON_88E (FW_PS_CLOCK_ON)
+/* 0x0*/
+#define FW_PS_STATE_RF_OFF_88E (FW_PS_CLOCK_ON)
+/* (FW_PS_STATE_RF_OFF)*/
+#define FW_PS_STATE_RF_OFF_LOW_PWR_88E (FW_PS_CLOCK_OFF)
+
+#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4)
+#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3)
+#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2)
+#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1)
+
+
+/* For 88E H2C PwrMode Cmd ID 5.*/
+#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define FW_PWR_STATE_RF_OFF 0
+
+#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK)
+#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF | FW_PS_ALL_ON))
+#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON))
+#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE))
+#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40)
+
+#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
+
+#define IS_IN_LOW_POWER_STATE_88E(fwpsstate) \
+ (FW_PS_STATE(fwpsstate) == FW_PS_CLOCK_OFF)
+
+#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define FW_PWR_STATE_RF_OFF 0
+
+#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
+
+#define SET_88E_H2CCMD_WOWLAN_FUNC_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_ALL_PKT_DROP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 4, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_GPIO_ACTIVE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 5, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_REKEY_WAKE_UP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 6, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 7, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_GPIONUM(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_88E_H2CCMD_WOWLAN_GPIO_DURATION(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_RLBM(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 4, __val)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 4, 4, __val)
+#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd) \
+ LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8)
+
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+/* AP_OFFLOAD */
+#define SET_H2CCMD_AP_OFFLOAD_ON(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+
+/* Keep Alive Control*/
+#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+
+/*REMOTE_WAKE_CTRL */
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val)
+#else
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#endif
+
+/* GTK_OFFLOAD */
+#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+
+/* AOAC_RSVDPAGE_LOC */
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_REM_WAKE_CTRL_INFO(__ph2ccmd, __val)\
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd), 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+
+void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,
+ u8 ap_offload_enable);
+void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+ bool dl_finished);
+void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+int rtl8723be_download_fw(struct ieee80211_hw *hw,
+ bool buse_wake_on_wlan_fw);
+void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
+ u8 p2p_ps_state);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
new file mode 100644
index 000000000000..0fdf0909321f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -0,0 +1,2523 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "../rtl8723com/dm_common.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "led.h"
+#include "hw.h"
+#include "pwrseq.h"
+#include "../btcoexist/rtl_btc.h"
+
+#define LLT_CONFIG 5
+
+static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+ struct sk_buff *skb = __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(rtlpci->pdev,
+ rtlpriv->cfg->ops->get_desc(
+ (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+}
+
+static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpci->reg_bcn_ctrl_val |= set_bits;
+ rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
+
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+}
+
+static void _rtl8723be_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8723be_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(1);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8723be_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl8723be_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val,
+ bool need_turn_off_ckk)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool support_remote_wake_up;
+ u32 count = 0, isr_regaddr, content;
+ bool schedule_timer = need_turn_off_ckk;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
+ (u8 *)(&support_remote_wake_up));
+
+ if (!rtlhal->fw_ready)
+ return;
+ if (!rtlpriv->psc.fw_current_inpsmode)
+ return;
+
+ while (1) {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (rtlhal->fw_clk_change_in_progress) {
+ while (rtlhal->fw_clk_change_in_progress) {
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ count++;
+ udelay(100);
+ if (count > 1000)
+ return;
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ }
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ } else {
+ rtlhal->fw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ break;
+ }
+ }
+ if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
+ &rpwm_val);
+ if (FW_PS_IS_ACK(rpwm_val)) {
+ isr_regaddr = REG_HISR;
+ content = rtl_read_dword(rtlpriv, isr_regaddr);
+ while (!(content & IMR_CPWM) && (count < 500)) {
+ udelay(50);
+ count++;
+ content = rtl_read_dword(rtlpriv, isr_regaddr);
+ }
+
+ if (content & IMR_CPWM) {
+ rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
+ rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! Set "
+ "pHalData->FwPSState = %X\n",
+ rtlhal->fw_ps_state);
+ }
+ }
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (schedule_timer) {
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ }
+ } else {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ }
+}
+
+static void _rtl8723be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ enum rf_pwrstate rtstate;
+ bool schedule_timer = false;
+ u8 queue;
+
+ if (!rtlhal->fw_ready)
+ return;
+ if (!rtlpriv->psc.fw_current_inpsmode)
+ return;
+ if (!rtlhal->allow_sw_to_change_hwclc)
+ return;
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate));
+ if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF)
+ return;
+
+ for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
+ ring = &rtlpci->tx_ring[queue];
+ if (skb_queue_len(&ring->queue)) {
+ schedule_timer = true;
+ break;
+ }
+ }
+ if (schedule_timer) {
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ return;
+ }
+ if (FW_PS_STATE(rtlhal->fw_ps_state) !=
+ FW_PS_STATE_RF_OFF_LOW_PWR_88E) {
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ if (!rtlhal->fw_clk_change_in_progress) {
+ rtlhal->fw_clk_change_in_progress = true;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
+ rtl_write_word(rtlpriv, REG_HISR, 0x0100);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ &rpwm_val);
+ spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+ rtlhal->fw_clk_change_in_progress = false;
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ } else {
+ spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+ mod_timer(&rtlpriv->works.fw_clockoff_timer,
+ jiffies + MSECS(10));
+ }
+ }
+}
+
+static void _rtl8723be_set_fw_ps_rf_on(struct ieee80211_hw *hw)
+{
+ u8 rpwm_val = 0;
+ rpwm_val |= (FW_PS_STATE_RF_OFF_88E | FW_PS_ACK);
+ _rtl8723be_set_fw_clock_on(hw, rpwm_val, true);
+}
+
+static void _rtl8723be_fwlps_leave(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool fw_current_inps = false;
+ u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE;
+
+ if (ppsc->low_power_enable) {
+ rpwm_val = (FW_PS_STATE_ALL_ON_88E | FW_PS_ACK);/* RF on */
+ _rtl8723be_set_fw_clock_on(hw, rpwm_val, false);
+ rtlhal->allow_sw_to_change_hwclc = false;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &fw_pwrmode);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inps));
+ } else {
+ rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &fw_pwrmode);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inps));
+ }
+}
+
+static void _rtl8723be_fwlps_enter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool fw_current_inps = true;
+ u8 rpwm_val;
+
+ if (ppsc->low_power_enable) {
+ rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_88E; /* RF off */
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inps));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &ppsc->fwctrl_psmode);
+ rtlhal->allow_sw_to_change_hwclc = true;
+ _rtl8723be_set_fw_clock_off(hw, rpwm_val);
+
+ } else {
+ rpwm_val = FW_PS_STATE_RF_OFF_88E; /* RF off */
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *)(&fw_current_inps));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+ &ppsc->fwctrl_psmode);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
+ }
+}
+
+void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ switch (variable) {
+ case HW_VAR_RCR:
+ *((u32 *)(val)) = rtlpci->receive_config;
+ break;
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON: {
+ enum rf_pwrstate rfstate;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+ (u8 *)(&rfstate));
+ if (rfstate == ERFOFF) {
+ *((bool *)(val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *)(val)) = false;
+ else
+ *((bool *)(val)) = true;
+ }
+ break; }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *)(val)) = ppsc->fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF: {
+ u64 tsf;
+ u32 *ptsf_low = (u32 *)&tsf;
+ u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+ *((u64 *)(val)) = tsf;
+
+ break; }
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process %x\n", variable);
+ break;
+ }
+}
+
+void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 idx;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:
+ for (idx = 0; idx < ETH_ALEN; idx++)
+ rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]);
+ break;
+ case HW_VAR_BASIC_RATE: {
+ u16 rate_cfg = ((u16 *)val)[0];
+ u8 rate_index = 0;
+ rate_cfg = rate_cfg & 0x15f;
+ rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1, (rate_cfg >> 8) & 0xff);
+ while (rate_cfg > 0x1) {
+ rate_cfg = (rate_cfg >> 1);
+ rate_index++;
+ }
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, rate_index);
+ break; }
+ case HW_VAR_BSSID:
+ for (idx = 0; idx < ETH_ALEN; idx++)
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]);
+ break;
+ case HW_VAR_SIFS:
+ rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+
+ if (!mac->ht_enable)
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, 0x0e0e);
+ else
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ *((u16 *)val));
+ break;
+ case HW_VAR_SLOT_TIME: {
+ u8 e_aci;
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
+
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &e_aci);
+ }
+ break; }
+ case HW_VAR_ACK_PREAMBLE: {
+ u8 reg_tmp;
+ u8 short_preamble = (bool)*val;
+ reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL + 2);
+ if (short_preamble) {
+ reg_tmp |= 0x02;
+ rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+ } else {
+ reg_tmp &= 0xFD;
+ rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+ }
+ break; }
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
+ break;
+ case HW_VAR_AMPDU_MIN_SPACE: {
+ u8 min_spacing_to_set;
+ u8 sec_min_space;
+
+ min_spacing_to_set = *val;
+ if (min_spacing_to_set <= 7) {
+ sec_min_space = 0;
+
+ if (min_spacing_to_set < sec_min_space)
+ min_spacing_to_set = sec_min_space;
+
+ mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
+ min_spacing_to_set);
+
+ *val = min_spacing_to_set;
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break; }
+ case HW_VAR_SHORTGI_DENSITY: {
+ u8 density_to_set;
+
+ density_to_set = *val;
+ mac->min_space_cfg |= (density_to_set << 3);
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ break; }
+ case HW_VAR_AMPDU_FACTOR: {
+ u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+ u8 factor_toset;
+ u8 *p_regtoset = NULL;
+ u8 index = 0;
+
+ p_regtoset = regtoset_normal;
+
+ factor_toset = *val;
+ if (factor_toset <= 3) {
+ factor_toset = (1 << (factor_toset + 2));
+ if (factor_toset > 0xf)
+ factor_toset = 0xf;
+
+ for (index = 0; index < 4; index++) {
+ if ((p_regtoset[index] & 0xf0) >
+ (factor_toset << 4))
+ p_regtoset[index] =
+ (p_regtoset[index] & 0x0f) |
+ (factor_toset << 4);
+
+ if ((p_regtoset[index] & 0x0f) > factor_toset)
+ p_regtoset[index] =
+ (p_regtoset[index] & 0xf0) |
+ (factor_toset);
+
+ rtl_write_byte(rtlpriv,
+ (REG_AGGLEN_LMT + index),
+ p_regtoset[index]);
+ }
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
+ }
+ break; }
+ case HW_VAR_AC_PARAM: {
+ u8 e_aci = *val;
+ rtl8723_dm_init_edca_turbo(hw);
+
+ if (rtlpci->acm_method != EACMWAY2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
+ &e_aci);
+ break; }
+ case HW_VAR_ACM_CTRL: {
+ u8 e_aci = *val;
+ union aci_aifsn *p_aci_aifsn =
+ (union aci_aifsn *)(&(mac->ac[0].aifs));
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ acm_ctrl =
+ acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
+
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= ACMHW_BEQEN;
+ break;
+ case AC2_VI:
+ acm_ctrl |= ACMHW_VIQEN;
+ break;
+ case AC3_VO:
+ acm_ctrl |= ACMHW_VOQEN;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set "
+ "failed: eACI is %d\n", acm);
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~ACMHW_BEQEN);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~ACMHW_VIQEN);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~ACMHW_BEQEN);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+ "Write 0x%X\n", acm_ctrl);
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break; }
+ case HW_VAR_RCR:
+ rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]);
+ rtlpci->receive_config = ((u32 *)(val))[0];
+ break;
+ case HW_VAR_RETRY_LIMIT: {
+ u8 retry_limit = *val;
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ break; }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *)val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *val;
+ break;
+ case HW_VAR_IO_CMD:
+ rtl8723be_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_SET_RPWM: {
+ u8 rpwm_val;
+
+ rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
+ udelay(1);
+
+ if (rpwm_val & BIT(7)) {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
+ } else {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
+ }
+ break; }
+ case HW_VAR_H2C_FW_PWRMODE:
+ rtl8723be_set_fw_pwrmode_cmd(hw, *val);
+ break;
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->fw_current_inpsmode = *((bool *)val);
+ break;
+ case HW_VAR_RESUME_CLK_ON:
+ _rtl8723be_set_fw_ps_rf_on(hw);
+ break;
+ case HW_VAR_FW_LPS_ACTION: {
+ bool enter_fwlps = *((bool *)val);
+
+ if (enter_fwlps)
+ _rtl8723be_fwlps_enter(hw);
+ else
+ _rtl8723be_fwlps_leave(hw);
+
+ break; }
+ case HW_VAR_H2C_FW_JOINBSSRPT: {
+ u8 mstatus = *val;
+ u8 tmp_regcr, tmp_reg422, bcnvalid_reg;
+ u8 count = 0, dlbcn_count = 0;
+ bool recover = false;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL);
+
+ tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr | BIT(0)));
+
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0);
+
+ tmp_reg422 = rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & (~BIT(6)));
+ if (tmp_reg422 & BIT(6))
+ recover = true;
+
+ do {
+ bcnvalid_reg = rtl_read_byte(rtlpriv,
+ REG_TDECTRL + 2);
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 2,
+ (bcnvalid_reg | BIT(0)));
+ _rtl8723be_return_beacon_queue_skb(hw);
+
+ rtl8723be_set_fw_rsvdpagepkt(hw, 0);
+ bcnvalid_reg = rtl_read_byte(rtlpriv,
+ REG_TDECTRL + 2);
+ count = 0;
+ while (!(bcnvalid_reg & BIT(0)) && count < 20) {
+ count++;
+ udelay(10);
+ bcnvalid_reg = rtl_read_byte(rtlpriv,
+ REG_TDECTRL + 2);
+ }
+ dlbcn_count++;
+ } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
+
+ if (bcnvalid_reg & BIT(0))
+ rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0));
+
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4));
+
+ if (recover) {
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422);
+ }
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr & ~(BIT(0))));
+ }
+ rtl8723be_set_fw_joinbss_report_cmd(hw, *val);
+ break; }
+ case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
+ rtl8723be_set_p2p_ps_offload_cmd(hw, *val);
+ break;
+ case HW_VAR_AID: {
+ u16 u2btmp;
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
+ (u2btmp | mac->assoc_id));
+ break; }
+ case HW_VAR_CORRECT_TSF: {
+ u8 btype_ibss = *val;
+
+ if (btype_ibss)
+ _rtl8723be_stop_tx_beacon(hw);
+
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3));
+
+ rtl_write_dword(rtlpriv, REG_TSFTR,
+ (u32) (mac->tsf & 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32) ((mac->tsf >> 32) & 0xffffffff));
+
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
+
+ if (btype_ibss)
+ _rtl8723be_resume_tx_beacon(hw);
+ break; }
+ case HW_VAR_KEEP_ALIVE: {
+ u8 array[2];
+ array[0] = 0xff;
+ array[1] = *val;
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_KEEP_ALIVE_CTRL,
+ 2, array);
+ break; }
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process %x\n",
+ variable);
+ break;
+ }
+}
+
+static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ int count = 0;
+ u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) |
+ _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Failed to polling write LLT done at "
+ "address %d!\n", address);
+ status = false;
+ break;
+ }
+ } while (++count);
+
+ return status;
+}
+
+static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short i;
+ u8 txpktbuf_bndy;
+ u8 maxpage;
+ bool status;
+
+ maxpage = 255;
+ txpktbuf_bndy = 245;
+
+ rtl_write_dword(rtlpriv, REG_TRXFF_BNDY,
+ (0x27FF0000 | txpktbuf_bndy));
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_PBP, 0x31);
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = _rtl8723be_llt_write(hw, i, i + 1);
+ if (!status)
+ return status;
+ }
+ status = _rtl8723be_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+
+ if (!status)
+ return status;
+
+ for (i = txpktbuf_bndy; i < maxpage; i++) {
+ status = _rtl8723be_llt_write(hw, i, (i + 1));
+ if (!status)
+ return status;
+ }
+ status = _rtl8723be_llt_write(hw, maxpage, txpktbuf_bndy);
+ if (!status)
+ return status;
+
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x80e40808);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00);
+
+ return true;
+}
+
+static void _rtl8723be_gen_refresh_led_state(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+
+ if (rtlpriv->rtlhal.up_first_time)
+ return;
+
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+ rtl8723be_sw_led_on(hw, pled0);
+ else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
+ rtl8723be_sw_led_on(hw, pled0);
+ else
+ rtl8723be_sw_led_off(hw, pled0);
+}
+
+static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ unsigned char bytetmp;
+ unsigned short wordtmp;
+ u16 retry = 0;
+ bool mac_func_enable;
+
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+ /*Auto Power Down to CHIP-off State*/
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7));
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+ if (bytetmp == 0xFF)
+ mac_func_enable = true;
+ else
+ mac_func_enable = false;
+
+ /* HW Power on sequence */
+ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
+ PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
+ RTL8723_NIC_ENABLE_FLOW)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init MAC Fail as power on failure\n");
+ return false;
+ }
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+ bytetmp = 0xff;
+ rtl_write_byte(rtlpriv, REG_CR, bytetmp);
+ mdelay(2);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_HWSEQ_CTRL);
+ bytetmp |= 0x7f;
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp);
+ mdelay(2);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3);
+ if (bytetmp & BIT(0)) {
+ bytetmp = rtl_read_byte(rtlpriv, 0x7c);
+ bytetmp |= BIT(6);
+ rtl_write_byte(rtlpriv, 0x7c, bytetmp);
+ }
+ bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR);
+ bytetmp |= BIT(3);
+ rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp);
+ bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1);
+ bytetmp &= ~BIT(4);
+ rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+3);
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+3, bytetmp | 0x77);
+
+ rtl_write_word(rtlpriv, REG_CR, 0x2ff);
+
+ if (!mac_func_enable) {
+ if (!_rtl8723be_llt_table_init(hw))
+ return false;
+ }
+ rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff);
+
+ /* Enable FW Beamformer Interrupt */
+ bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3);
+ rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6));
+
+ wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
+ wordtmp &= 0xf;
+ wordtmp |= 0xF5B1;
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
+
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+ rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF);
+ rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
+
+ rtl_write_byte(rtlpriv, 0x4d0, 0x0);
+
+ rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
+ ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
+ DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_MGQ_DESA,
+ (u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
+ DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VOQ_DESA,
+ (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VIQ_DESA,
+ (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BEQ_DESA,
+ (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BKQ_DESA,
+ (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_HQ_DESA,
+ (u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
+ DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_RX_DESA,
+ (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
+ DMA_BIT_MASK(32));
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 3);
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, bytetmp | 0x77);
+
+ rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6));
+
+ rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3);
+
+ do {
+ retry++;
+ bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+ } while ((retry < 200) && (bytetmp & BIT(7)));
+
+ _rtl8723be_gen_refresh_led_state(hw);
+
+ rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, bytetmp & ~BIT(2));
+
+ return true;
+}
+
+static void _rtl8723be_hw_configure(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 reg_bw_opmode;
+ u32 reg_ratr, reg_prsr;
+
+ reg_bw_opmode = BW_OPMODE_20MHZ;
+ reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
+ RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+
+ rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+}
+
+static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ rtl_write_byte(rtlpriv, 0x34b, 0x93);
+ rtl_write_word(rtlpriv, 0x350, 0x870c);
+ rtl_write_byte(rtlpriv, 0x352, 0x1);
+
+ if (ppsc->support_backdoor)
+ rtl_write_byte(rtlpriv, 0x349, 0x1b);
+ else
+ rtl_write_byte(rtlpriv, 0x349, 0x03);
+
+ rtl_write_word(rtlpriv, 0x350, 0x2718);
+ rtl_write_byte(rtlpriv, 0x352, 0x1);
+}
+
+void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
+
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
+ return;
+ }
+ sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
+
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TXUSEDK;
+ sec_reg_value |= SCR_RXUSEDK;
+ }
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "The SECR-value %x\n",
+ sec_reg_value);
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+
+int rtl8723be_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ bool rtstatus = true;
+ int err;
+ u8 tmp_u1b;
+ unsigned long flags;
+
+ /* reenable interrupts to not interfere with other devices */
+ local_save_flags(flags);
+ local_irq_enable();
+
+ rtlpriv->rtlhal.being_init_adapter = true;
+ rtlpriv->intf_ops->disable_aspm(hw);
+ rtstatus = _rtl8723be_init_mac(hw);
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+ err = 1;
+ goto exit;
+ }
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG);
+ tmp_u1b &= 0x7F;
+ rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b);
+
+ err = rtl8723_download_fw(hw, true);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
+ err = 1;
+ rtlhal->fw_ready = false;
+ goto exit;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+ rtlhal->last_hmeboxnum = 0;
+ rtl8723be_phy_mac_config(hw);
+ /* because last function modify RCR, so we update
+ * rcr var here, or TP will unstable for receive_config
+ * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+ * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
+ */
+ rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+ rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+ rtl8723be_phy_bb_config(hw);
+ rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+ rtl8723be_phy_rf_config(hw);
+
+ rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtlphy->rfreg_chnlval[0] &= 0xFFF03FF;
+ rtlphy->rfreg_chnlval[0] |= (BIT(10) | BIT(11));
+
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+ _rtl8723be_hw_configure(hw);
+ rtl_cam_reset_all_entry(hw);
+ rtl8723be_enable_hw_security_config(hw);
+
+ ppsc->rfpwr_state = ERFON;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+ _rtl8723be_enable_aspm_back_door(hw);
+ rtlpriv->intf_ops->enable_aspm(hw);
+
+ rtl8723be_bt_hw_init(hw);
+
+ rtl_set_bbreg(hw, 0x64, BIT(20), 0);
+ rtl_set_bbreg(hw, 0x64, BIT(24), 0);
+
+ rtl_set_bbreg(hw, 0x40, BIT(4), 0);
+ rtl_set_bbreg(hw, 0x40, BIT(3), 1);
+
+ rtl_set_bbreg(hw, 0x944, BIT(0)|BIT(1), 0x3);
+ rtl_set_bbreg(hw, 0x930, 0xff, 0x77);
+
+ rtl_set_bbreg(hw, 0x38, BIT(11), 0x1);
+
+ rtl_set_bbreg(hw, 0xb2c, 0xffffffff, 0x80000000);
+
+ if (ppsc->rfpwr_state == ERFON) {
+ rtl8723be_dm_check_txpower_tracking(hw);
+ rtl8723be_phy_lc_calibrate(hw);
+ }
+ tmp_u1b = efuse_read_1byte(hw, 0x1FA);
+ if (!(tmp_u1b & BIT(0))) {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
+ }
+ if (!(tmp_u1b & BIT(4))) {
+ tmp_u1b = rtl_read_byte(rtlpriv, 0x16);
+ tmp_u1b &= 0x0F;
+ rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
+ udelay(10);
+ rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+ }
+ rtl8723be_dm_init(hw);
+exit:
+ local_irq_restore(flags);
+ rtlpriv->rtlhal.being_init_adapter = false;
+ return err;
+}
+
+static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ enum version_8723e version = VERSION_UNKNOWN;
+ u8 count = 0;
+ u8 value8;
+ u32 value32;
+
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0);
+
+ value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 2);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 2, value8 | BIT(0));
+
+ value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, value8 | BIT(0));
+
+ value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ while (((value8 & BIT(0))) && (count++ < 100)) {
+ udelay(10);
+ value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ }
+ count = 0;
+ value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
+ while ((value8 == 0) && (count++ < 50)) {
+ value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
+ mdelay(1);
+ }
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
+ if ((value32 & (CHIP_8723B)) != CHIP_8723B)
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unkown chip version\n");
+ else
+ version = (enum version_8723e) VERSION_TEST_CHIP_1T1R_8723B;
+
+ rtlphy->rf_type = RF_1T1R;
+
+ value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
+ if (value8 >= 0x02)
+ version |= BIT(3);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R");
+
+ return version;
+}
+
+static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc;
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+
+ rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
+ "clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED ||
+ type == NL80211_IFTYPE_STATION) {
+ _rtl8723be_stop_tx_beacon(hw);
+ _rtl8723be_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
+ _rtl8723be_resume_tx_beacon(hw);
+ _rtl8723be_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: "
+ "No such media status(%x).\n", type);
+ }
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Network type %d not support!\n", type);
+ return 1;
+ }
+ rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+ if ((bt_msr & 0x03) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+ return 0;
+}
+
+void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 reg_rcr = rtlpci->receive_config;
+
+ if (rtlpriv->psc.rfpwr_state != ERFON)
+ return;
+
+ if (check_bssid) {
+ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ } else if (!check_bssid) {
+ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ }
+}
+
+int rtl8723be_set_network_type(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (_rtl8723be_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+
+ if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+ if (type != NL80211_IFTYPE_AP)
+ rtl8723be_set_check_bssid(hw, true);
+ } else {
+ rtl8723be_set_check_bssid(hw, false);
+ }
+ return 0;
+}
+
+/* don't set REG_EDCA_BE_PARAM here
+ * because mac80211 will send pkt when scan
+ */
+void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtl8723_dm_init_edca_turbo(hw);
+ switch (aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
+ break;
+ case AC0_BE:
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
+ break;
+ default:
+ RT_ASSERT(false, "invalid aci: %d !\n", aci);
+ break;
+ }
+}
+
+void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
+ /* there are some C2H CMDs have been sent
+ * before system interrupt is enabled, e.g., C2H, CPWM.
+ * So we need to clear all C2H events that FW has notified,
+ * otherwise FW won't schedule any commands anymore.
+ */
+ rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0);
+ /*enable system interrupt*/
+ rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF);
+}
+
+void rtl8723be_disable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED);
+ rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED);
+ rtlpci->irq_enabled = false;
+ synchronize_irq(rtlpci->pdev->irq);
+}
+
+static void _rtl8723be_poweroff_adapter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 u1b_tmp;
+
+ /* Combo (PCIe + USB) Card and PCIe-MF Card */
+ /* 1. Run LPS WL RFOFF flow */
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8723_NIC_LPS_ENTER_FLOW);
+
+ /* 2. 0x1F[7:0] = 0 */
+ /* turn off RF */
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+ if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) &&
+ rtlhal->fw_ready)
+ rtl8723be_firmware_selfreset(hw);
+
+ /* Reset MCU. Suggested by Filen. */
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
+
+ /* g. MCUFWDL 0x80[1:0]= 0 */
+ /* reset MCU ready status */
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+ /* HW card disable configuration. */
+ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+ PWR_INTF_PCI_MSK, RTL8723_NIC_DISABLE_FLOW);
+
+ /* Reset MCU IO Wrapper */
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0));
+
+ /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */
+ /* lock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+}
+
+void rtl8723be_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl8723be_set_media_status(hw, opmode);
+ if (rtlpriv->rtlhal.driver_is_goingto_unload ||
+ ppsc->rfoff_reason > RF_CHANGE_BY_PS)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ _rtl8723be_poweroff_adapter(hw);
+
+ /* after power off we should do iqk again */
+ rtlpriv->phy.iqk_initialized = false;
+}
+
+void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, *p_inta);
+
+ *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) &
+ rtlpci->irq_mask[1];
+ rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+}
+
+void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval, atim_window;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2; /*FIX MERGE */
+ rtl8723be_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+ rtl8723be_enable_interrupt(hw);
+}
+
+void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
+ rtl8723be_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl8723be_enable_interrupt(hw);
+}
+
+void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+
+ if (add_msr)
+ rtlpci->irq_mask[0] |= add_msr;
+ if (rm_msr)
+ rtlpci->irq_mask[0] &= (~rm_msr);
+ rtl8723be_disable_interrupt(hw);
+ rtl8723be_enable_interrupt(hw);
+}
+
+static u8 _rtl8723be_get_chnl_group(u8 chnl)
+{
+ u8 group;
+
+ if (chnl < 3)
+ group = 0;
+ else if (chnl < 9)
+ group = 1;
+ else
+ group = 2;
+ return group;
+}
+
+static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw,
+ struct txpower_info_2g *pw2g,
+ struct txpower_info_5g *pw5g,
+ bool autoload_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 path, addr = EEPROM_TX_PWR_INX, group, cnt = 0;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM8723BE(): "
+ "PROMContent[0x%x]= 0x%x\n",
+ (addr + 1), hwinfo[addr + 1]);
+ if (0xFF == hwinfo[addr + 1]) /*YJ, add, 120316*/
+ autoload_fail = true;
+
+ if (autoload_fail) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
+ for (path = 0; path < MAX_RF_PATH; path++) {
+ /* 2.4G default value */
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pw2g->index_cck_base[path][group] = 0x2D;
+ pw2g->index_bw40_base[path][group] = 0x2D;
+ }
+ for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
+ if (cnt == 0) {
+ pw2g->bw20_diff[path][0] = 0x02;
+ pw2g->ofdm_diff[path][0] = 0x04;
+ } else {
+ pw2g->bw20_diff[path][cnt] = 0xFE;
+ pw2g->bw40_diff[path][cnt] = 0xFE;
+ pw2g->cck_diff[path][cnt] = 0xFE;
+ pw2g->ofdm_diff[path][cnt] = 0xFE;
+ }
+ }
+ }
+ return;
+ }
+ for (path = 0; path < MAX_RF_PATH; path++) {
+ /*2.4G default value*/
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pw2g->index_cck_base[path][group] = hwinfo[addr++];
+ if (pw2g->index_cck_base[path][group] == 0xFF)
+ pw2g->index_cck_base[path][group] = 0x2D;
+ }
+ for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) {
+ pw2g->index_bw40_base[path][group] = hwinfo[addr++];
+ if (pw2g->index_bw40_base[path][group] == 0xFF)
+ pw2g->index_bw40_base[path][group] = 0x2D;
+ }
+ for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
+ if (cnt == 0) {
+ pw2g->bw40_diff[path][cnt] = 0;
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->bw20_diff[path][cnt] = 0x02;
+ } else {
+ pw2g->bw20_diff[path][cnt] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ /*bit sign number to 8 bit sign number*/
+ if (pw2g->bw20_diff[path][cnt] & BIT(3))
+ pw2g->bw20_diff[path][cnt] |= 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->ofdm_diff[path][cnt] = 0x04;
+ } else {
+ pw2g->ofdm_diff[path][cnt] =
+ (hwinfo[addr] & 0x0f);
+ /*bit sign number to 8 bit sign number*/
+ if (pw2g->ofdm_diff[path][cnt] & BIT(3))
+ pw2g->ofdm_diff[path][cnt] |=
+ 0xF0;
+ }
+ pw2g->cck_diff[path][cnt] = 0;
+ addr++;
+ } else {
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->bw40_diff[path][cnt] = 0xFE;
+ } else {
+ pw2g->bw40_diff[path][cnt] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ if (pw2g->bw40_diff[path][cnt] & BIT(3))
+ pw2g->bw40_diff[path][cnt] |=
+ 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->bw20_diff[path][cnt] = 0xFE;
+ } else {
+ pw2g->bw20_diff[path][cnt] =
+ (hwinfo[addr] & 0x0f);
+ if (pw2g->bw20_diff[path][cnt] & BIT(3))
+ pw2g->bw20_diff[path][cnt] |=
+ 0xF0;
+ }
+ addr++;
+
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->ofdm_diff[path][cnt] = 0xFE;
+ } else {
+ pw2g->ofdm_diff[path][cnt] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ if (pw2g->ofdm_diff[path][cnt] & BIT(3))
+ pw2g->ofdm_diff[path][cnt] |=
+ 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw2g->cck_diff[path][cnt] = 0xFE;
+ } else {
+ pw2g->cck_diff[path][cnt] =
+ (hwinfo[addr] & 0x0f);
+ if (pw2g->cck_diff[path][cnt] & BIT(3))
+ pw2g->cck_diff[path][cnt] |=
+ 0xF0;
+ }
+ addr++;
+ }
+ }
+ /*5G default value*/
+ for (group = 0; group < MAX_CHNL_GROUP_5G; group++) {
+ pw5g->index_bw40_base[path][group] = hwinfo[addr++];
+ if (pw5g->index_bw40_base[path][group] == 0xFF)
+ pw5g->index_bw40_base[path][group] = 0xFE;
+ }
+ for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
+ if (cnt == 0) {
+ pw5g->bw40_diff[path][cnt] = 0;
+
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->bw20_diff[path][cnt] = 0;
+ } else {
+ pw5g->bw20_diff[path][0] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ if (pw5g->bw20_diff[path][cnt] & BIT(3))
+ pw5g->bw20_diff[path][cnt] |=
+ 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->ofdm_diff[path][cnt] = 0x04;
+ } else {
+ pw5g->ofdm_diff[path][0] =
+ (hwinfo[addr] & 0x0f);
+ if (pw5g->ofdm_diff[path][cnt] & BIT(3))
+ pw5g->ofdm_diff[path][cnt] |=
+ 0xF0;
+ }
+ addr++;
+ } else {
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->bw40_diff[path][cnt] = 0xFE;
+ } else {
+ pw5g->bw40_diff[path][cnt] =
+ (hwinfo[addr] & 0xf0) >> 4;
+ if (pw5g->bw40_diff[path][cnt] & BIT(3))
+ pw5g->bw40_diff[path][cnt] |= 0xF0;
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->bw20_diff[path][cnt] = 0xFE;
+ } else {
+ pw5g->bw20_diff[path][cnt] =
+ (hwinfo[addr] & 0x0f);
+ if (pw5g->bw20_diff[path][cnt] & BIT(3))
+ pw5g->bw20_diff[path][cnt] |= 0xF0;
+ }
+ addr++;
+ }
+ }
+ if (hwinfo[addr] == 0xFF) {
+ pw5g->ofdm_diff[path][1] = 0xFE;
+ pw5g->ofdm_diff[path][2] = 0xFE;
+ } else {
+ pw5g->ofdm_diff[path][1] = (hwinfo[addr] & 0xf0) >> 4;
+ pw5g->ofdm_diff[path][2] = (hwinfo[addr] & 0x0f);
+ }
+ addr++;
+
+ if (hwinfo[addr] == 0xFF)
+ pw5g->ofdm_diff[path][3] = 0xFE;
+ else
+ pw5g->ofdm_diff[path][3] = (hwinfo[addr] & 0x0f);
+ addr++;
+
+ for (cnt = 1; cnt < MAX_TX_COUNT; cnt++) {
+ if (pw5g->ofdm_diff[path][cnt] == 0xFF)
+ pw5g->ofdm_diff[path][cnt] = 0xFE;
+ else if (pw5g->ofdm_diff[path][cnt] & BIT(3))
+ pw5g->ofdm_diff[path][cnt] |= 0xF0;
+ }
+ }
+}
+
+static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct txpower_info_2g pw2g;
+ struct txpower_info_5g pw5g;
+ u8 rf_path, index;
+ u8 i;
+
+ _rtl8723be_read_power_value_fromprom(hw, &pw2g, &pw5g, autoload_fail,
+ hwinfo);
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 14; i++) {
+ index = _rtl8723be_get_chnl_group(i+1);
+
+ rtlefuse->txpwrlevel_cck[rf_path][i] =
+ pw2g.index_cck_base[rf_path][index];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+ pw2g.index_bw40_base[rf_path][index];
+ }
+ for (i = 0; i < MAX_TX_COUNT; i++) {
+ rtlefuse->txpwr_ht20diff[rf_path][i] =
+ pw2g.bw20_diff[rf_path][i];
+ rtlefuse->txpwr_ht40diff[rf_path][i] =
+ pw2g.bw40_diff[rf_path][i];
+ rtlefuse->txpwr_legacyhtdiff[rf_path][i] =
+ pw2g.ofdm_diff[rf_path][i];
+ }
+ for (i = 0; i < 14; i++) {
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ "RF(%d)-Ch(%d) [CCK / HT40_1S ] = "
+ "[0x%x / 0x%x ]\n", rf_path, i,
+ rtlefuse->txpwrlevel_cck[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i]);
+ }
+ }
+ if (!autoload_fail)
+ rtlefuse->eeprom_thermalmeter =
+ hwinfo[EEPROM_THERMAL_METER_88E];
+ else
+ rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
+
+ if (rtlefuse->eeprom_thermalmeter == 0xff || autoload_fail) {
+ rtlefuse->apk_thermalmeterignore = true;
+ rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
+ }
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+
+ if (!autoload_fail) {
+ rtlefuse->eeprom_regulatory =
+ hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0x07;/*bit0~2*/
+ if (hwinfo[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
+ rtlefuse->eeprom_regulatory = 0;
+ } else {
+ rtlefuse->eeprom_regulatory = 0;
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
+}
+
+static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
+ bool pseudo_test)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE];
+ u16 eeprom_id;
+ bool is_toshiba_smid1 = false;
+ bool is_toshiba_smid2 = false;
+ bool is_samsung_smid = false;
+ bool is_lenovo_smid = false;
+ u16 toshiba_smid1[] = {
+ 0x6151, 0x6152, 0x6154, 0x6155, 0x6177, 0x6178, 0x6179, 0x6180,
+ 0x7151, 0x7152, 0x7154, 0x7155, 0x7177, 0x7178, 0x7179, 0x7180,
+ 0x8151, 0x8152, 0x8154, 0x8155, 0x8181, 0x8182, 0x8184, 0x8185,
+ 0x9151, 0x9152, 0x9154, 0x9155, 0x9181, 0x9182, 0x9184, 0x9185
+ };
+ u16 toshiba_smid2[] = {
+ 0x6181, 0x6184, 0x6185, 0x7181, 0x7182, 0x7184, 0x7185, 0x8181,
+ 0x8182, 0x8184, 0x8185, 0x9181, 0x9182, 0x9184, 0x9185
+ };
+ u16 samsung_smid[] = {
+ 0x6191, 0x6192, 0x6193, 0x7191, 0x7192, 0x7193, 0x8191, 0x8192,
+ 0x8193, 0x9191, 0x9192, 0x9193
+ };
+ u16 lenovo_smid[] = {
+ 0x8195, 0x9195, 0x7194, 0x8200, 0x8201, 0x8202, 0x9199, 0x9200
+ };
+
+ if (pseudo_test) {
+ /* needs to be added */
+ return;
+ }
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ rtl_efuse_shadow_map_update(hw);
+
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "RTL819X Not boot from eeprom, check it !!");
+ }
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
+ hwinfo, HWSET_MAX_SIZE);
+
+ eeprom_id = *((u16 *)&hwinfo[0]);
+ if (eeprom_id != RTL8723BE_EEPROM_ID) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtlefuse->autoload_failflag = false;
+ }
+ if (rtlefuse->autoload_failflag)
+ return;
+
+ rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
+ rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
+ rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROMId = 0x%4x\n", eeprom_id);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
+ *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "dev_addr: %pM\n",
+ rtlefuse->dev_addr);
+
+ /*parse xtal*/
+ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8723BE];
+ if (rtlefuse->crystalcap == 0xFF)
+ rtlefuse->crystalcap = 0x20;
+
+ _rtl8723be_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
+ hwinfo);
+
+ rtl8723be_read_bt_coexist_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag,
+ hwinfo);
+
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->txpwr_fromeprom = true;
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+
+ /* set channel plan to world wide 13 */
+ rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+
+ if (rtlhal->oem_id == RT_CID_DEFAULT) {
+ /* Does this one have a Toshiba SMID from group 1? */
+ for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) {
+ if (rtlefuse->eeprom_smid == toshiba_smid1[i]) {
+ is_toshiba_smid1 = true;
+ break;
+ }
+ }
+ /* Does this one have a Toshiba SMID from group 2? */
+ for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) {
+ if (rtlefuse->eeprom_smid == toshiba_smid2[i]) {
+ is_toshiba_smid2 = true;
+ break;
+ }
+ }
+ /* Does this one have a Samsung SMID? */
+ for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) {
+ if (rtlefuse->eeprom_smid == samsung_smid[i]) {
+ is_samsung_smid = true;
+ break;
+ }
+ }
+ /* Does this one have a Lenovo SMID? */
+ for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) {
+ if (rtlefuse->eeprom_smid == lenovo_smid[i]) {
+ is_lenovo_smid = true;
+ break;
+ }
+ }
+ switch (rtlefuse->eeprom_oemid) {
+ case EEPROM_CID_DEFAULT:
+ if (rtlefuse->eeprom_did == 0x8176) {
+ if (rtlefuse->eeprom_svid == 0x10EC &&
+ is_toshiba_smid1) {
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ } else if (rtlefuse->eeprom_svid == 0x1025) {
+ rtlhal->oem_id = RT_CID_819X_ACER;
+ } else if (rtlefuse->eeprom_svid == 0x10EC &&
+ is_samsung_smid) {
+ rtlhal->oem_id = RT_CID_819X_SAMSUNG;
+ } else if (rtlefuse->eeprom_svid == 0x10EC &&
+ is_lenovo_smid) {
+ rtlhal->oem_id = RT_CID_819X_LENOVO;
+ } else if ((rtlefuse->eeprom_svid == 0x10EC &&
+ rtlefuse->eeprom_smid == 0x8197) ||
+ (rtlefuse->eeprom_svid == 0x10EC &&
+ rtlefuse->eeprom_smid == 0x9196)) {
+ rtlhal->oem_id = RT_CID_819X_CLEVO;
+ } else if ((rtlefuse->eeprom_svid == 0x1028 &&
+ rtlefuse->eeprom_smid == 0x8194) ||
+ (rtlefuse->eeprom_svid == 0x1028 &&
+ rtlefuse->eeprom_smid == 0x8198) ||
+ (rtlefuse->eeprom_svid == 0x1028 &&
+ rtlefuse->eeprom_smid == 0x9197) ||
+ (rtlefuse->eeprom_svid == 0x1028 &&
+ rtlefuse->eeprom_smid == 0x9198)) {
+ rtlhal->oem_id = RT_CID_819X_DELL;
+ } else if ((rtlefuse->eeprom_svid == 0x103C &&
+ rtlefuse->eeprom_smid == 0x1629)) {
+ rtlhal->oem_id = RT_CID_819X_HP;
+ } else if ((rtlefuse->eeprom_svid == 0x1A32 &&
+ rtlefuse->eeprom_smid == 0x2315)) {
+ rtlhal->oem_id = RT_CID_819X_QMI;
+ } else if ((rtlefuse->eeprom_svid == 0x10EC &&
+ rtlefuse->eeprom_smid == 0x8203)) {
+ rtlhal->oem_id = RT_CID_819X_PRONETS;
+ } else if ((rtlefuse->eeprom_svid == 0x1043 &&
+ rtlefuse->eeprom_smid == 0x84B5)) {
+ rtlhal->oem_id = RT_CID_819X_EDIMAX_ASUS;
+ } else {
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ }
+ } else if (rtlefuse->eeprom_did == 0x8178) {
+ if (rtlefuse->eeprom_svid == 0x10EC &&
+ is_toshiba_smid2)
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ else if (rtlefuse->eeprom_svid == 0x1025)
+ rtlhal->oem_id = RT_CID_819X_ACER;
+ else if ((rtlefuse->eeprom_svid == 0x10EC &&
+ rtlefuse->eeprom_smid == 0x8186))
+ rtlhal->oem_id = RT_CID_819X_PRONETS;
+ else if ((rtlefuse->eeprom_svid == 0x1043 &&
+ rtlefuse->eeprom_smid == 0x84B6))
+ rtlhal->oem_id =
+ RT_CID_819X_EDIMAX_ASUS;
+ else
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ } else {
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ }
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_CCX:
+ rtlhal->oem_id = RT_CID_CCX;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819X_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ break;
+ default:
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ break;
+ }
+ }
+}
+
+static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ pcipriv->ledctl.led_opendrain = true;
+ switch (rtlhal->oem_id) {
+ case RT_CID_819X_HP:
+ pcipriv->ledctl.led_opendrain = true;
+ break;
+ case RT_CID_819X_LENOVO:
+ case RT_CID_DEFAULT:
+ case RT_CID_TOSHIBA:
+ case RT_CID_CCX:
+ case RT_CID_819X_ACER:
+ case RT_CID_WHQL:
+ default:
+ break;
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+}
+
+void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ rtlhal->version = _rtl8723be_read_chip_version(hw);
+ if (get_rf_type(rtlphy) == RF_1T1R)
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ else
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ if (tmp_u1b & BIT(4)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtlefuse->epromtype = EEPROM_93C46;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+ }
+ if (tmp_u1b & BIT(5)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtlefuse->autoload_failflag = false;
+ _rtl8723be_read_adapter_info(hw, false);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+ }
+ _rtl8723be_hal_customized_behavior(hw);
+}
+
+static void rtl8723be_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 ratr_value;
+ u8 ratr_index = 0;
+ u8 nmode = mac->ht_enable;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+ u16 shortgi_rate;
+ u32 tmp_ratr_value;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = mac->mode;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_value = sta->supp_rates[1] << 4;
+ else
+ ratr_value = sta->supp_rates[0];
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_value = 0xfff;
+ ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ nmode = 1;
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R)
+ ratr_mask = 0x000ff005;
+ else
+ ratr_mask = 0x0f0ff005;
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+ break;
+ }
+ if ((rtlpriv->btcoexist.bt_coexistence) &&
+ (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
+ (rtlpriv->btcoexist.bt_cur_state) &&
+ (rtlpriv->btcoexist.bt_ant_isolation) &&
+ ((rtlpriv->btcoexist.bt_service == BT_SCO) ||
+ (rtlpriv->btcoexist.bt_service == BT_BUSY)))
+ ratr_value &= 0x0fffcfc0;
+ else
+ ratr_value &= 0x0FFFFFFF;
+
+ if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz))) {
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+}
+
+static u8 _rtl8723be_mrate_idx_to_arfr_id(struct ieee80211_hw *hw,
+ u8 rate_index)
+{
+ u8 ret = 0;
+
+ switch (rate_index) {
+ case RATR_INX_WIRELESS_NGB:
+ ret = 1;
+ break;
+ case RATR_INX_WIRELESS_N:
+ case RATR_INX_WIRELESS_NG:
+ ret = 5;
+ break;
+ case RATR_INX_WIRELESS_NB:
+ ret = 3;
+ break;
+ case RATR_INX_WIRELESS_GB:
+ ret = 6;
+ break;
+ case RATR_INX_WIRELESS_G:
+ ret = 7;
+ break;
+ case RATR_INX_WIRELESS_B:
+ ret = 8;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_sta_info *sta_entry = NULL;
+ u32 ratr_bitmap;
+ u8 ratr_index;
+ u8 curtxbw_40mhz = (sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = 0;
+ bool shortgi = false;
+ u8 rate_mask[7];
+ u8 macid = 0;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ wirelessmode = sta_entry->wireless_mode;
+ if (mac->opmode == NL80211_IFTYPE_STATION ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ curtxbw_40mhz = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ macid = sta->aid + 1;
+
+ ratr_bitmap = sta->supp_rates[0];
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_bitmap = 0xfff;
+
+ ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_A;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (mimo_ps == IEEE80211_SMPS_STATIC ||
+ mimo_ps == IEEE80211_SMPS_DYNAMIC) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if (rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f8f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f8ff000;
+ else
+ ratr_bitmap &= 0x0f8ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f8f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f8ff000;
+ else
+ ratr_bitmap &= 0x0f8ff005;
+ }
+ }
+ }
+ if ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz)) {
+ if (macid == 0)
+ shortgi = true;
+ else if (macid == 1)
+ shortgi = false;
+ }
+ break;
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+ }
+ sta_entry->ratr_index = ratr_index;
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
+ *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
+ rate_mask[0] = macid;
+ rate_mask[1] = _rtl8723be_mrate_idx_to_arfr_id(hw, ratr_index) |
+ (shortgi ? 0x80 : 0x00);
+ rate_mask[2] = curtxbw_40mhz;
+ /* if (prox_priv->proxim_modeinfo->power_output > 0)
+ * rate_mask[2] |= BIT(6);
+ */
+
+ rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
+ rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8);
+ rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
+ rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4], rate_mask[5],
+ rate_mask[6]);
+ rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RA_MASK, 7, rate_mask);
+ _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
+}
+
+void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (rtlpriv->dm.useramask)
+ rtl8723be_update_hal_rate_mask(hw, sta, rssi_level);
+ else
+ rtl8723be_update_hal_rate_table(hw, sta);
+}
+
+void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x0e0e;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ u8 u1tmp;
+ bool actuallyset = false;
+
+ if (rtlpriv->rtlhal.being_init_adapter)
+ return false;
+
+ if (ppsc->swrf_processing)
+ return false;
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ return false;
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+ cur_rfstate = ppsc->rfpwr_state;
+
+ rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
+ rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1)));
+
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2);
+
+ if (rtlphy->polarity_ctl)
+ e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON;
+ else
+ e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
+
+ if (ppsc->hwradiooff &&
+ (e_rfpowerstate_toset == ERFON)) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
+
+ e_rfpowerstate_toset = ERFON;
+ ppsc->hwradiooff = false;
+ actuallyset = true;
+ } else if (!ppsc->hwradiooff &&
+ (e_rfpowerstate_toset == ERFOFF)) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+
+ e_rfpowerstate_toset = ERFOFF;
+ ppsc->hwradiooff = true;
+ actuallyset = true;
+ }
+ if (actuallyset) {
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ } else {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
+ }
+ *valid = 1;
+ return !ppsc->hwradiooff;
+}
+
+void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 *macaddr = p_macaddr;
+ u32 entry_id = 0;
+ bool is_pairwise = false;
+
+ static u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ if (clear_all) {
+ u8 idx = 0;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+ } else {
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ enc_algo = CAM_TKIP;
+ break;
+ }
+
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ if (mac->opmode == NL80211_IFTYPE_AP) {
+ entry_id = rtl_cam_get_free_entry(hw,
+ p_macaddr);
+ if (entry_id >= TOTAL_CAM_ENTRY) {
+ RT_TRACE(rtlpriv, COMP_SEC,
+ DBG_EMERG,
+ "Can not find free"
+ " hw security cam "
+ "entry\n");
+ return;
+ }
+ } else {
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ }
+ key_index = PAIRWISE_KEYIDX;
+ is_pairwise = true;
+ }
+ }
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
+ if (mac->opmode == NL80211_IFTYPE_AP)
+ rtl_cam_del_entry(hw, p_macaddr);
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
+ if (is_pairwise) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[key_index]);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+ }
+ }
+}
+
+void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool auto_load_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value;
+ u32 tmpu_32;
+
+ if (!auto_load_fail) {
+ tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+ if (tmpu_32 & BIT(18))
+ rtlpriv->btcoexist.btc_info.btcoexist = 1;
+ else
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ value = hwinfo[RF_OPTION4];
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
+ rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+ } else {
+ rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
+ rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+ }
+}
+
+void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* 0:Low, 1:High, 2:From Efuse. */
+ rtlpriv->btcoexist.reg_bt_iso = 2;
+ /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
+ rtlpriv->btcoexist.reg_bt_sco = 3;
+ /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
+ rtlpriv->btcoexist.reg_bt_sco = 0;
+}
+
+void rtl8723be_bt_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv);
+}
+
+void rtl8723be_suspend(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8723be_resume(struct ieee80211_hw *hw)
+{
+}
+
+/* Turn on AAP (RCR:bit 0) for promicuous mode. */
+void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
+ bool write_into_reg)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ if (allow_all_da) /* Set BIT0 */
+ rtlpci->receive_config |= RCR_AAP;
+ else /* Clear BIT0 */
+ rtlpci->receive_config &= ~RCR_AAP;
+
+ if (write_into_reg)
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+ RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
+ "receive_config = 0x%08X, write_into_reg =%d\n",
+ rtlpci->receive_config, write_into_reg);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
new file mode 100644
index 000000000000..b7449a9b57e4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
@@ -0,0 +1,64 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_HW_H__
+#define __RTL8723BE_HW_H__
+
+void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
+
+void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb);
+int rtl8723be_hw_init(struct ieee80211_hw *hw);
+void rtl8723be_card_disable(struct ieee80211_hw *hw);
+void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
+void rtl8723be_disable_interrupt(struct ieee80211_hw *hw);
+int rtl8723be_set_network_type(struct ieee80211_hw *hw,
+ enum nl80211_iftype type);
+void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level);
+void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo);
+void rtl8723be_bt_reg_init(struct ieee80211_hw *hw);
+void rtl8723be_bt_hw_init(struct ieee80211_hw *hw);
+void rtl8723be_suspend(struct ieee80211_hw *hw);
+void rtl8723be_resume(struct ieee80211_hw *hw);
+void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
+ bool write_into_reg);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/rtlwifi/rtl8723be/led.c
new file mode 100644
index 000000000000..cb931a38dc48
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/led.c
@@ -0,0 +1,153 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "reg.h"
+#include "led.h"
+
+static void _rtl8723be_init_led(struct ieee80211_hw *hw, struct rtl_led *pled,
+ enum rtl_led_pin ledpin)
+{
+ pled->hw = hw;
+ pled->ledpin = ledpin;
+ pled->ledon = false;
+}
+
+void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u8 ledcfg;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+ ledcfg &= ~BIT(6);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5));
+ break;
+ case LED_PIN_LED1:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+ rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ pled->ledon = true;
+}
+
+void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u8 ledcfg;
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
+
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg &= 0xf0;
+ if (pcipriv->ledctl.led_opendrain) {
+ ledcfg &= 0x90; /* Set to software control. */
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
+ ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
+ ledcfg &= 0xFE;
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
+ } else {
+ ledcfg &= ~BIT(6);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(3) | BIT(5)));
+ }
+ break;
+ case LED_PIN_LED1:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+ ledcfg &= 0x10; /* Set to software control. */
+ rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3));
+
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not processed\n");
+ break;
+ }
+ pled->ledon = false;
+}
+
+void rtl8723be_init_sw_leds(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
+ _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+static void _rtl8723be_sw_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+ switch (ledaction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ rtl8723be_sw_led_on(hw, pled0);
+ break;
+ case LED_CTL_POWER_OFF:
+ rtl8723be_sw_led_off(hw, pled0);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtl8723be_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ (ledaction == LED_CTL_TX ||
+ ledaction == LED_CTL_RX ||
+ ledaction == LED_CTL_SITE_SURVEY ||
+ ledaction == LED_CTL_LINK ||
+ ledaction == LED_CTL_NO_LINK ||
+ ledaction == LED_CTL_START_TO_LINK ||
+ ledaction == LED_CTL_POWER_ON)) {
+ return;
+ }
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
+ _rtl8723be_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/rtlwifi/rtl8723be/led.h
new file mode 100644
index 000000000000..c57de379ee8d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/led.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_LED_H__
+#define __RTL8723BE_LED_H__
+
+void rtl8723be_init_sw_leds(struct ieee80211_hw *hw);
+void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8723be_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
new file mode 100644
index 000000000000..1575ef9ece9f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
@@ -0,0 +1,2156 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "../core.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "../rtl8723com/phy_common.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+#include "trx.h"
+
+static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw);
+static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw,
+ u8 channel, u8 *stage,
+ u8 *step, u32 *delay);
+static bool _rtl8723be_check_condition(struct ieee80211_hw *hw,
+ const u32 condition)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 _board = rtlefuse->board_type; /*need efuse define*/
+ u32 _interface = rtlhal->interface;
+ u32 _platform = 0x08;/*SupportPlatform */
+ u32 cond = condition;
+
+ if (condition == 0xCDCDCDCD)
+ return true;
+
+ cond = condition & 0xFF;
+ if ((_board & cond) == 0 && cond != 0x1F)
+ return false;
+
+ cond = condition & 0xFF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = condition & 0xFF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n");
+ arraylength = RTL8723BEMAC_1T_ARRAYLEN;
+ ptrarray = RTL8723BEMAC_1T_ARRAY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength);
+ for (i = 0; i < arraylength; i = i + 2)
+ rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+ return true;
+}
+
+static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ #define READ_NEXT_PAIR(v1, v2, i) \
+ do { \
+ i += 2; \
+ v1 = array_table[i];\
+ v2 = array_table[i+1]; \
+ } while (0)
+
+ int i;
+ u32 *array_table;
+ u16 arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 v1 = 0, v2 = 0;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ arraylen = RTL8723BEPHY_REG_1TARRAYLEN;
+ array_table = RTL8723BEPHY_REG_1TARRAY;
+
+ for (i = 0; i < arraylen; i = i + 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1 < 0xcdcdcdcd) {
+ rtl_bb_delay(hw, v1, v2);
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8723be_check_condition(hw, array_table[i])) {
+ /*Discard the following (offset, data) pairs*/
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < arraylen - 2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ i -= 2; /* prevent from for-loop += 2*/
+ /* Configure matched pairs and
+ * skip to end of if-else.
+ */
+ } else {
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < arraylen - 2) {
+ rtl_bb_delay(hw,
+ v1, v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ arraylen = RTL8723BEAGCTAB_1TARRAYLEN;
+ array_table = RTL8723BEAGCTAB_1TARRAY;
+
+ for (i = 0; i < arraylen; i = i + 2) {
+ v1 = array_table[i];
+ v2 = array_table[i+1];
+ if (v1 < 0xCDCDCDCD) {
+ rtl_set_bbreg(hw, array_table[i],
+ MASKDWORD,
+ array_table[i + 1]);
+ udelay(1);
+ continue;
+ } else {/*This line is the start line of branch.*/
+ if (!_rtl8723be_check_condition(hw, array_table[i])) {
+ /* Discard the following
+ * (offset, data) pairs
+ */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < arraylen - 2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ i -= 2; /* prevent from for-loop += 2*/
+ /*Configure matched pairs and
+ *skip to end of if-else.
+ */
+ } else {
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < arraylen - 2) {
+ rtl_set_bbreg(hw, array_table[i],
+ MASKDWORD,
+ array_table[i + 1]);
+ udelay(1);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is "
+ "%x Rtl818EEPHY_REGArray[1] is %x\n",
+ array_table[i], array_table[i + 1]);
+ }
+ }
+ return true;
+}
+
+static u8 _rtl8723be_get_rate_section_index(u32 regaddr)
+{
+ u8 index = 0;
+
+ switch (regaddr) {
+ case RTXAGC_A_RATE18_06:
+ case RTXAGC_B_RATE18_06:
+ index = 0;
+ break;
+ case RTXAGC_A_RATE54_24:
+ case RTXAGC_B_RATE54_24:
+ index = 1;
+ break;
+ case RTXAGC_A_CCK1_MCS32:
+ case RTXAGC_B_CCK1_55_MCS32:
+ index = 2;
+ break;
+ case RTXAGC_B_CCK11_A_CCK2_11:
+ index = 3;
+ break;
+ case RTXAGC_A_MCS03_MCS00:
+ case RTXAGC_B_MCS03_MCS00:
+ index = 4;
+ break;
+ case RTXAGC_A_MCS07_MCS04:
+ case RTXAGC_B_MCS07_MCS04:
+ index = 5;
+ break;
+ case RTXAGC_A_MCS11_MCS08:
+ case RTXAGC_B_MCS11_MCS08:
+ index = 6;
+ break;
+ case RTXAGC_A_MCS15_MCS12:
+ case RTXAGC_B_MCS15_MCS12:
+ index = 7;
+ break;
+ default:
+ regaddr &= 0xFFF;
+ if (regaddr >= 0xC20 && regaddr <= 0xC4C)
+ index = (u8) ((regaddr - 0xC20) / 4);
+ else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
+ index = (u8) ((regaddr - 0xE20) / 4);
+ break;
+ };
+ return index;
+}
+
+u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+ original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), "
+ "bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
+
+ return readback_value;
+}
+
+void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, path);
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = rtl8723_phy_rf_serial_read(hw, path,
+ regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+
+ rtl8723_phy_rf_serial_write(hw, path, regaddr, data);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, path);
+}
+
+bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool rtstatus = _rtl8723be_phy_config_mac_with_headerfile(hw);
+
+ rtl_write_byte(rtlpriv, 0x04CA, 0x0B);
+ return rtstatus;
+}
+
+bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw)
+{
+ bool rtstatus = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 regval;
+ u8 reg_hwparafile = 1;
+ u32 tmp;
+ u8 crystalcap = rtlpriv->efuse.crystalcap;
+ rtl8723_phy_init_bb_rf_reg_def(hw);
+ regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
+ regval | BIT(13) | BIT(0) | BIT(1));
+
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
+ FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
+ FEN_BB_GLB_RSTN | FEN_BBRSTB);
+ tmp = rtl_read_dword(rtlpriv, 0x4c);
+ rtl_write_dword(rtlpriv, 0x4c, tmp | BIT(23));
+
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+
+ if (reg_hwparafile == 1)
+ rtstatus = _rtl8723be_phy_bb8723b_config_parafile(hw);
+
+ crystalcap = crystalcap & 0x3F;
+ rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
+ (crystalcap | crystalcap << 6));
+
+ return rtstatus;
+}
+
+bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw)
+{
+ return rtl8723be_phy_rf6052_config(hw);
+}
+
+static void _rtl8723be_config_rf_reg(struct ieee80211_hw *hw, u32 addr,
+ u32 data, enum radio_path rfpath,
+ u32 regaddr)
+{
+ if (addr == 0xfe || addr == 0xffe) {
+ mdelay(50);
+ } else {
+ rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data);
+ udelay(1);
+ }
+}
+
+static void _rtl8723be_config_rf_radio_a(struct ieee80211_hw *hw,
+ u32 addr, u32 data)
+{
+ u32 content = 0x1000; /*RF Content: radio_a_txt*/
+ u32 maskforphyset = (u32)(content & 0xE000);
+
+ _rtl8723be_config_rf_reg(hw, addr, data, RF90_PATH_A,
+ addr | maskforphyset);
+}
+
+static void _rtl8723be_phy_init_tx_power_by_rate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u8 band, path, txnum, section;
+
+ for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band)
+ for (path = 0; path < TX_PWR_BY_RATE_NUM_RF; ++path)
+ for (txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum)
+ for (section = 0;
+ section < TX_PWR_BY_RATE_NUM_SECTION;
+ ++section)
+ rtlphy->tx_power_by_rate_offset[band]
+ [path][txnum][section] = 0;
+}
+
+static void phy_set_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band,
+ u8 path, u8 rate_section,
+ u8 txnum, u8 value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (path > RF90_PATH_D) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n",
+ path);
+ return;
+ }
+
+ if (band == BAND_ON_2_4G) {
+ switch (rate_section) {
+ case CCK:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value;
+ break;
+ case OFDM:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value;
+ break;
+ case HT_MCS0_MCS7:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value;
+ break;
+ case HT_MCS8_MCS15:
+ rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path"
+ " %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
+ break;
+ };
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
+ band);
+ }
+}
+
+static u8 phy_get_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path,
+ u8 txnum, u8 rate_section)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 value = 0;
+ if (path > RF90_PATH_D) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
+ path);
+ return 0;
+ }
+
+ if (band == BAND_ON_2_4G) {
+ switch (rate_section) {
+ case CCK:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0];
+ break;
+ case OFDM:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1];
+ break;
+ case HT_MCS0_MCS7:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2];
+ break;
+ case HT_MCS8_MCS15:
+ value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path"
+ " %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
+ break;
+ };
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
+ band);
+ }
+
+ return value;
+}
+
+static void _rtl8723be_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u16 raw_value = 0;
+ u8 base = 0, path = 0;
+
+ for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) {
+ if (path == RF90_PATH_A) {
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset
+ [BAND_ON_2_4G][path][RF_1TX][3] >> 24) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, CCK,
+ RF_1TX, base);
+ } else if (path == RF90_PATH_B) {
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset
+ [BAND_ON_2_4G][path][RF_1TX][3] >> 0) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path,
+ CCK, RF_1TX, base);
+ }
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [path][RF_1TX][1] >> 24) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX,
+ base);
+
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [path][RF_1TX][5] >> 24) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7,
+ RF_1TX, base);
+
+ raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [path][RF_2TX][7] >> 24) & 0xFF;
+ base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+ phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path,
+ HT_MCS8_MCS15, RF_2TX, base);
+ }
+}
+
+static void phy_conv_dbm_to_rel(u32 *data, u8 start, u8 end, u8 base_val)
+{
+ char i = 0;
+ u8 temp_value = 0;
+ u32 temp_data = 0;
+
+ for (i = 3; i >= 0; --i) {
+ if (i >= start && i <= end) {
+ /* Get the exact value */
+ temp_value = (u8) (*data >> (i * 8)) & 0xF;
+ temp_value += ((u8) ((*data >> (i*8 + 4)) & 0xF)) * 10;
+
+ /* Change the value to a relative value */
+ temp_value = (temp_value > base_val) ?
+ temp_value - base_val :
+ base_val - temp_value;
+ } else {
+ temp_value = (u8) (*data >> (i * 8)) & 0xFF;
+ }
+ temp_data <<= 8;
+ temp_data |= temp_value;
+ }
+ *data = temp_data;
+}
+
+static void conv_dbm_to_rel(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 base = 0, rfpath = RF90_PATH_A;
+
+ base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+ RF_1TX, CCK);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][2]), 1, 1, base);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][3]), 1, 3, base);
+
+ base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+ RF_1TX, OFDM);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][0]), 0, 3, base);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][1]), 0, 3, base);
+
+ base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+ RF_1TX, HT_MCS0_MCS7);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][4]), 0, 3, base);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_1TX][5]), 0, 3, base);
+
+ base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+ RF_2TX, HT_MCS8_MCS15);
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_2TX][6]), 0, 3, base);
+
+ phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+ [rfpath][RF_2TX][7]), 0, 3, base);
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ "<=== conv_dbm_to_rel()\n");
+}
+
+static void _rtl8723be_phy_txpower_by_rate_configuration(
+ struct ieee80211_hw *hw)
+{
+ _rtl8723be_phy_store_txpower_by_rate_base(hw);
+ conv_dbm_to_rel(hw);
+}
+
+static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ bool rtstatus;
+
+ rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+ return false;
+ }
+ _rtl8723be_phy_init_tx_power_by_rate(hw);
+ if (!rtlefuse->autoload_failflag) {
+ rtlphy->pwrgroup_cnt = 0;
+ rtstatus = _rtl8723be_phy_config_bb_with_pgheaderfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ }
+ _rtl8723be_phy_txpower_by_rate_configuration(hw);
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+ return false;
+ }
+ rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+ return false;
+ }
+ rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ 0x200));
+ return true;
+}
+
+static void _rtl8723be_store_tx_power_by_rate(struct ieee80211_hw *hw,
+ u32 band, u32 rfpath,
+ u32 txnum, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rate_section = _rtl8723be_get_rate_section_index(regaddr);
+
+ if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
+ RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
+ "Invalid Band %d\n", band);
+ return;
+ }
+
+ if (rfpath > TX_PWR_BY_RATE_NUM_RF) {
+ RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
+ "Invalid RfPath %d\n", rfpath);
+ return;
+ }
+ if (txnum > TX_PWR_BY_RATE_NUM_RF) {
+ RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
+ "Invalid TxNum %d\n", txnum);
+ return;
+ }
+ rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] =
+ data;
+}
+
+static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+ u32 v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0;
+
+ phy_regarray_pg_len = RTL8723BEPHY_REG_ARRAY_PGLEN;
+ phy_regarray_table_pg = RTL8723BEPHY_REG_ARRAY_PG;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i = i + 6) {
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ v4 = phy_regarray_table_pg[i+3];
+ v5 = phy_regarray_table_pg[i+4];
+ v6 = phy_regarray_table_pg[i+5];
+
+ if (v1 < 0xcdcdcdcd) {
+ if (phy_regarray_table_pg[i] == 0xfe ||
+ phy_regarray_table_pg[i] == 0xffe)
+ mdelay(50);
+ else
+ _rtl8723be_store_tx_power_by_rate(hw,
+ v1, v2, v3, v4, v5, v6);
+ continue;
+ } else {
+ /*don't need the hw_body*/
+ if (!_rtl8723be_check_condition(hw,
+ phy_regarray_table_pg[i])) {
+ i += 2; /* skip the pair of expression*/
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ while (v2 != 0xDEAD) {
+ i += 3;
+ v1 = phy_regarray_table_pg[i];
+ v2 = phy_regarray_table_pg[i+1];
+ v3 = phy_regarray_table_pg[i+2];
+ }
+ }
+ }
+ }
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
+ }
+ return true;
+}
+
+bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath)
+{
+ #define READ_NEXT_RF_PAIR(v1, v2, i) \
+ do { \
+ i += 2; \
+ v1 = radioa_array_table[i]; \
+ v2 = radioa_array_table[i+1]; \
+ } while (0)
+
+ int i;
+ bool rtstatus = true;
+ u32 *radioa_array_table;
+ u16 radioa_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 v1 = 0, v2 = 0;
+
+ radioa_arraylen = RTL8723BE_RADIOA_1TARRAYLEN;
+ radioa_array_table = RTL8723BE_RADIOA_1TARRAY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8723BE_RADIOA_1TARRAY %d\n", radioa_arraylen);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtstatus = true;
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ v1 = radioa_array_table[i];
+ v2 = radioa_array_table[i+1];
+ if (v1 < 0xcdcdcdcd) {
+ _rtl8723be_config_rf_radio_a(hw, v1, v2);
+ } else { /*This line is the start line of branch.*/
+ if (!_rtl8723be_check_condition(hw,
+ radioa_array_table[i])) {
+ /* Discard the following
+ * (offset, data) pairs
+ */
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < radioa_arraylen - 2)
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2*/
+ } else {
+ /* Configure matched pairs
+ * and skip to end of if-else.
+ */
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD &&
+ i < radioa_arraylen - 2) {
+ _rtl8723be_config_rf_radio_a(hw,
+ v1, v2);
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD &&
+ i < radioa_arraylen - 2) {
+ READ_NEXT_RF_PAIR(v1, v2, i);
+ }
+ }
+ }
+ }
+
+ if (rtlhal->oem_id == RT_CID_819X_HP)
+ _rtl8723be_config_rf_radio_a(hw, 0x52, 0x7E4BD);
+
+ break;
+ case RF90_PATH_B:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ return true;
+}
+
+void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->default_initialgain[0] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[1] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[2] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[3] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50 = 0x%x, "
+ "c58 = 0x%x, c60 = 0x%x, c68 = 0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
+
+ rtlphy->framesync = (u8) rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
+ MASKBYTE0);
+ rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
+ MASKDWORD);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
+}
+
+void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 txpwr_level;
+ long txpwr_dbm;
+
+ txpwr_level = rtlphy->cur_cck_txpwridx;
+ txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B,
+ txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) >
+ txpwr_dbm)
+ txpwr_dbm =
+ rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+ txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level);
+ *powerlevel = txpwr_dbm;
+}
+
+static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
+ u8 rate)
+{
+ u8 rate_section = 0;
+
+ switch (rate) {
+ case DESC92C_RATE1M:
+ rate_section = 2;
+ break;
+ case DESC92C_RATE2M:
+ case DESC92C_RATE5_5M:
+ if (path == RF90_PATH_A)
+ rate_section = 3;
+ else if (path == RF90_PATH_B)
+ rate_section = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_section = 3;
+ break;
+ case DESC92C_RATE6M:
+ case DESC92C_RATE9M:
+ case DESC92C_RATE12M:
+ case DESC92C_RATE18M:
+ rate_section = 0;
+ break;
+ case DESC92C_RATE24M:
+ case DESC92C_RATE36M:
+ case DESC92C_RATE48M:
+ case DESC92C_RATE54M:
+ rate_section = 1;
+ break;
+ case DESC92C_RATEMCS0:
+ case DESC92C_RATEMCS1:
+ case DESC92C_RATEMCS2:
+ case DESC92C_RATEMCS3:
+ rate_section = 4;
+ break;
+ case DESC92C_RATEMCS4:
+ case DESC92C_RATEMCS5:
+ case DESC92C_RATEMCS6:
+ case DESC92C_RATEMCS7:
+ rate_section = 5;
+ break;
+ case DESC92C_RATEMCS8:
+ case DESC92C_RATEMCS9:
+ case DESC92C_RATEMCS10:
+ case DESC92C_RATEMCS11:
+ rate_section = 6;
+ break;
+ case DESC92C_RATEMCS12:
+ case DESC92C_RATEMCS13:
+ case DESC92C_RATEMCS14:
+ case DESC92C_RATEMCS15:
+ rate_section = 7;
+ break;
+ default:
+ RT_ASSERT(true, "Rate_Section is Illegal\n");
+ break;
+ }
+ return rate_section;
+}
+
+static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw,
+ enum band_type band,
+ enum radio_path rfpath, u8 rate)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 shift = 0, rate_section, tx_num;
+ char tx_pwr_diff = 0;
+
+ rate_section = _rtl8723be_phy_get_ratesection_intxpower_byrate(rfpath,
+ rate);
+ tx_num = RF_TX_NUM_NONIMPLEMENT;
+
+ if (tx_num == RF_TX_NUM_NONIMPLEMENT) {
+ if (rate >= DESC92C_RATEMCS8 && rate <= DESC92C_RATEMCS15)
+ tx_num = RF_2TX;
+ else
+ tx_num = RF_1TX;
+ }
+
+ switch (rate) {
+ case DESC92C_RATE6M:
+ case DESC92C_RATE24M:
+ case DESC92C_RATEMCS0:
+ case DESC92C_RATEMCS4:
+ case DESC92C_RATEMCS8:
+ case DESC92C_RATEMCS12:
+ shift = 0;
+ break;
+ case DESC92C_RATE1M:
+ case DESC92C_RATE2M:
+ case DESC92C_RATE9M:
+ case DESC92C_RATE36M:
+ case DESC92C_RATEMCS1:
+ case DESC92C_RATEMCS5:
+ case DESC92C_RATEMCS9:
+ case DESC92C_RATEMCS13:
+ shift = 8;
+ break;
+ case DESC92C_RATE5_5M:
+ case DESC92C_RATE12M:
+ case DESC92C_RATE48M:
+ case DESC92C_RATEMCS2:
+ case DESC92C_RATEMCS6:
+ case DESC92C_RATEMCS10:
+ case DESC92C_RATEMCS14:
+ shift = 16;
+ break;
+ case DESC92C_RATE11M:
+ case DESC92C_RATE18M:
+ case DESC92C_RATE54M:
+ case DESC92C_RATEMCS3:
+ case DESC92C_RATEMCS7:
+ case DESC92C_RATEMCS11:
+ case DESC92C_RATEMCS15:
+ shift = 24;
+ break;
+ default:
+ RT_ASSERT(true, "Rate_Section is Illegal\n");
+ break;
+ }
+ tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rfpath][tx_num]
+ [rate_section] >> shift) & 0xff;
+
+ return tx_pwr_diff;
+}
+
+static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path,
+ u8 rate, u8 bandwidth, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 index = (channel - 1);
+ u8 txpower;
+ u8 power_diff_byrate = 0;
+
+ if (channel > 14 || channel < 1) {
+ index = 0;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Illegal channel!\n");
+ }
+ if (RTL8723E_RX_HAL_IS_CCK_RATE(rate))
+ txpower = rtlefuse->txpwrlevel_cck[path][index];
+ else if (DESC92C_RATE6M <= rate)
+ txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
+ else
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "invalid rate\n");
+
+ if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M &&
+ !RTL8723E_RX_HAL_IS_CCK_RATE(rate))
+ txpower += rtlefuse->txpwr_legacyhtdiff[0][TX_1S];
+
+ if (bandwidth == HT_CHANNEL_WIDTH_20) {
+ if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15)
+ txpower += rtlefuse->txpwr_ht20diff[0][TX_1S];
+ if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15)
+ txpower += rtlefuse->txpwr_ht20diff[0][TX_2S];
+ } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
+ if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15)
+ txpower += rtlefuse->txpwr_ht40diff[0][TX_1S];
+ if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15)
+ txpower += rtlefuse->txpwr_ht40diff[0][TX_2S];
+ }
+ if (rtlefuse->eeprom_regulatory != 2)
+ power_diff_byrate = _rtl8723be_get_txpower_by_rate(hw,
+ BAND_ON_2_4G,
+ path, rate);
+
+ txpower += power_diff_byrate;
+
+ if (txpower > MAX_POWER_INDEX)
+ txpower = MAX_POWER_INDEX;
+
+ return txpower;
+}
+
+static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw,
+ u8 power_index, u8 path, u8 rate)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (path == RF90_PATH_A) {
+ switch (rate) {
+ case DESC92C_RATE1M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_CCK1_MCS32,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATE2M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATE5_5M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATE11M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATE6M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATE9M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATE12M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATE18M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATE24M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATE36M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATE48M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATE54M:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATEMCS0:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATEMCS1:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATEMCS2:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATEMCS3:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATEMCS4:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATEMCS5:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATEMCS6:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATEMCS7:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+ MASKBYTE3, power_index);
+ break;
+ case DESC92C_RATEMCS8:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+ MASKBYTE0, power_index);
+ break;
+ case DESC92C_RATEMCS9:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+ MASKBYTE1, power_index);
+ break;
+ case DESC92C_RATEMCS10:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+ MASKBYTE2, power_index);
+ break;
+ case DESC92C_RATEMCS11:
+ rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+ MASKBYTE3, power_index);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Rate!!\n");
+ break;
+ }
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
+ }
+}
+
+void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 cck_rates[] = {DESC92C_RATE1M, DESC92C_RATE2M,
+ DESC92C_RATE5_5M, DESC92C_RATE11M};
+ u8 ofdm_rates[] = {DESC92C_RATE6M, DESC92C_RATE9M,
+ DESC92C_RATE12M, DESC92C_RATE18M,
+ DESC92C_RATE24M, DESC92C_RATE36M,
+ DESC92C_RATE48M, DESC92C_RATE54M};
+ u8 ht_rates_1t[] = {DESC92C_RATEMCS0, DESC92C_RATEMCS1,
+ DESC92C_RATEMCS2, DESC92C_RATEMCS3,
+ DESC92C_RATEMCS4, DESC92C_RATEMCS5,
+ DESC92C_RATEMCS6, DESC92C_RATEMCS7};
+ u8 i, size;
+ u8 power_index;
+
+ if (!rtlefuse->txpwr_fromeprom)
+ return;
+
+ size = sizeof(cck_rates) / sizeof(u8);
+ for (i = 0; i < size; i++) {
+ power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
+ cck_rates[i],
+ rtl_priv(hw)->phy.current_chan_bw,
+ channel);
+ _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
+ cck_rates[i]);
+ }
+ size = sizeof(ofdm_rates) / sizeof(u8);
+ for (i = 0; i < size; i++) {
+ power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
+ ofdm_rates[i],
+ rtl_priv(hw)->phy.current_chan_bw,
+ channel);
+ _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
+ ofdm_rates[i]);
+ }
+ size = sizeof(ht_rates_1t) / sizeof(u8);
+ for (i = 0; i < size; i++) {
+ power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
+ ht_rates_1t[i],
+ rtl_priv(hw)->phy.current_chan_bw,
+ channel);
+ _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
+ ht_rates_1t[i]);
+ }
+}
+
+void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP:
+ iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Unknown Scan Backup operation.\n");
+ break;
+ }
+ }
+}
+
+void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 reg_bw_opmode;
+ u8 reg_prsr_rsc;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
+
+ if (is_hal_stop(rtlhal)) {
+ rtlphy->set_bwmode_inprogress = false;
+ return;
+ }
+
+ reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+ reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ reg_bw_opmode |= BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ reg_prsr_rsc = (reg_prsr_rsc & 0x90) |
+ (mac->cur_40_prime_sc << 5);
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ break;
+ }
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
+ (mac->cur_40_prime_sc >> 1));
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+ rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+ (mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+ break;
+ }
+ rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+ rtlphy->set_bwmode_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+}
+
+void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_bw = rtlphy->current_chan_bw;
+
+ if (rtlphy->set_bwmode_inprogress)
+ return;
+ rtlphy->set_bwmode_inprogress = true;
+ if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+ rtl8723be_phy_set_bw_mode_callback(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
+ rtlphy->set_bwmode_inprogress = false;
+ rtlphy->current_chan_bw = tmp_bw;
+ }
+}
+
+void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 delay;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
+ if (is_hal_stop(rtlhal))
+ return;
+ do {
+ if (!rtlphy->sw_chnl_inprogress)
+ break;
+ if (!rtl8723be_phy_sw_chn_step_by_step(hw,
+ rtlphy->current_channel,
+ &rtlphy->sw_chnl_stage,
+ &rtlphy->sw_chnl_step,
+ &delay)) {
+ if (delay > 0)
+ mdelay(delay);
+ else
+ continue;
+ } else {
+ rtlphy->sw_chnl_inprogress = false;
+ }
+ break;
+ } while (true);
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+}
+
+u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlphy->sw_chnl_inprogress)
+ return 0;
+ if (rtlphy->set_bwmode_inprogress)
+ return 0;
+ RT_ASSERT((rtlphy->current_channel <= 14),
+ "WIRELESS_MODE_G but channel>14");
+ rtlphy->sw_chnl_inprogress = true;
+ rtlphy->sw_chnl_stage = 0;
+ rtlphy->sw_chnl_step = 0;
+ if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+ rtl8723be_phy_sw_chnl_callback(hw);
+ RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schdule "
+ "workitem current channel %d\n",
+ rtlphy->current_channel);
+ rtlphy->sw_chnl_inprogress = false;
+ } else {
+ RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or"
+ " unload\n");
+ rtlphy->sw_chnl_inprogress = false;
+ }
+ return 1;
+}
+
+static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw,
+ u8 channel, u8 *stage,
+ u8 *step, u32 *delay)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
+ u32 precommoncmdcnt;
+ struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
+ u32 postcommoncmdcnt;
+ struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
+ u32 rfdependcmdcnt;
+ struct swchnlcmd *currentcmd = NULL;
+ u8 rfpath;
+ u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
+ precommoncmdcnt = 0;
+ rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ 0, 0, 0);
+ rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+ postcommoncmdcnt = 0;
+ rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+ MAX_POSTCMD_CNT, CMDID_END,
+ 0, 0, 0);
+ rfdependcmdcnt = 0;
+
+ RT_ASSERT((channel >= 1 && channel <= 14),
+ "illegal channel for Zebra: %d\n", channel);
+
+ rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT,
+ CMDID_RF_WRITEREG,
+ RF_CHNLBW, channel, 10);
+
+ rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT,
+ CMDID_END, 0, 0, 0);
+
+ do {
+ switch (*stage) {
+ case 0:
+ currentcmd = &precommoncmd[*step];
+ break;
+ case 1:
+ currentcmd = &rfdependcmd[*step];
+ break;
+ case 2:
+ currentcmd = &postcommoncmd[*step];
+ break;
+ }
+
+ if (currentcmd->cmdid == CMDID_END) {
+ if ((*stage) == 2) {
+ return true;
+ } else {
+ (*stage)++;
+ (*step) = 0;
+ continue;
+ }
+ }
+
+ switch (currentcmd->cmdid) {
+ case CMDID_SET_TXPOWEROWER_LEVEL:
+ rtl8723be_phy_set_txpower_level(hw, channel);
+ break;
+ case CMDID_WRITEPORT_ULONG:
+ rtl_write_dword(rtlpriv, currentcmd->para1,
+ currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_USHORT:
+ rtl_write_word(rtlpriv, currentcmd->para1,
+ (u16) currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_UCHAR:
+ rtl_write_byte(rtlpriv, currentcmd->para1,
+ (u8) currentcmd->para2);
+ break;
+ case CMDID_RF_WRITEREG:
+ for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] =
+ ((rtlphy->rfreg_chnlval[rfpath] &
+ 0xfffffc00) | currentcmd->para2);
+
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ currentcmd->para1,
+ RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[rfpath]);
+ }
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+
+ break;
+ } while (true);
+
+ (*delay) = currentcmd->msdelay;
+ (*step)++;
+ return false;
+}
+
+static u8 _rtl8723be_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+ u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
+ u8 result = 0x00;
+
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x30008c1c);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x8214032a);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160000);
+
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
+
+ mdelay(IQK_DELAY_TIME);
+
+ reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+ reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
+ reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
+ reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
+
+ if (!(reg_eac & BIT(28)) &&
+ (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ return result;
+}
+
+static bool phy_similarity_cmp(struct ieee80211_hw *hw, long result[][8],
+ u8 c1, u8 c2)
+{
+ u32 i, j, diff, simularity_bitmap, bound;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 final_candidate[2] = { 0xFF, 0xFF };
+ bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
+
+ if (is2t)
+ bound = 8;
+ else
+ bound = 4;
+
+ simularity_bitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ diff = (result[c1][i] > result[c2][i]) ?
+ (result[c1][i] - result[c2][i]) :
+ (result[c2][i] - result[c1][i]);
+
+ if (diff > MAX_TOLERANCE) {
+ if ((i == 2 || i == 6) && !simularity_bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ final_candidate[(i / 4)] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ final_candidate[(i / 4)] = c1;
+ else
+ simularity_bitmap |= (1 << i);
+ } else {
+ simularity_bitmap |= (1 << i);
+ }
+ }
+ }
+
+ if (simularity_bitmap == 0) {
+ for (i = 0; i < (bound / 4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+ result[3][j] =
+ result[final_candidate[i]][j];
+ bresult = false;
+ }
+ }
+ return bresult;
+ } else if (!(simularity_bitmap & 0x0F)) {
+ for (i = 0; i < 4; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else if (!(simularity_bitmap & 0xF0) && is2t) {
+ for (i = 4; i < 8; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else {
+ return false;
+ }
+}
+
+static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
+ long result[][8], u8 t, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 i;
+ u8 patha_ok;
+ u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ 0x85c, 0xe6c, 0xe70, 0xe74,
+ 0xe78, 0xe7c, 0xe80, 0xe84,
+ 0xe88, 0xe8c, 0xed0, 0xed4,
+ 0xed8, 0xedc, 0xee0, 0xeec
+ };
+
+ u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ 0x522, 0x550, 0x551, 0x040
+ };
+ u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
+ ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR,
+ RFPGA0_XCD_RFINTERFACESW, 0xb68, 0xb6c,
+ 0x870, 0x860,
+ 0x864, 0x800
+ };
+ const u32 retrycount = 2;
+ u32 path_sel_bb, path_sel_rf;
+ u8 tmp_reg_c50, tmp_reg_c58;
+
+ tmp_reg_c50 = rtl_get_bbreg(hw, 0xc50, MASKBYTE0);
+ tmp_reg_c58 = rtl_get_bbreg(hw, 0xc58, MASKBYTE0);
+
+ if (t == 0) {
+ rtl8723_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ rtl8723_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl8723_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ }
+ rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t);
+ if (t == 0) {
+ rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ }
+ if (!rtlphy->rfpi_enable)
+ rtl8723_phy_pi_mode_switch(hw, true);
+
+ path_sel_bb = rtl_get_bbreg(hw, 0x948, MASKDWORD);
+ path_sel_rf = rtl_get_rfreg(hw, RF90_PATH_A, 0xb0, 0xfffff);
+
+ /*BB Setting*/
+ rtl_set_bbreg(hw, 0x800, BIT(24), 0x00);
+ rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+
+ rtl_set_bbreg(hw, 0x870, BIT(10), 0x01);
+ rtl_set_bbreg(hw, 0x870, BIT(26), 0x01);
+ rtl_set_bbreg(hw, 0x860, BIT(10), 0x00);
+ rtl_set_bbreg(hw, 0x864, BIT(10), 0x00);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASKDWORD, 0x10000);
+ rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x81004800);
+ for (i = 0; i < retrycount; i++) {
+ patha_ok = _rtl8723be_phy_path_a_iqk(hw, is2t);
+ if (patha_ok == 0x01) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Tx IQK Success!!\n");
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ break;
+ }
+ }
+
+ if (0 == patha_ok)
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A IQK Success!!\n");
+ if (is2t) {
+ rtl8723_phy_path_a_standby(hw);
+ rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t);
+ }
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
+
+ if (t != 0) {
+ if (!rtlphy->rfpi_enable)
+ rtl8723_phy_pi_mode_switch(hw, false);
+ rtl8723_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+
+ rtl_set_bbreg(hw, 0x948, MASKDWORD, path_sel_bb);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0xb0, 0xfffff, path_sel_rf);
+
+ rtl_set_bbreg(hw, 0xc50, MASKBYTE0, 0x50);
+ rtl_set_bbreg(hw, 0xc50, MASKBYTE0, tmp_reg_c50);
+ if (is2t) {
+ rtl_set_bbreg(hw, 0xc58, MASKBYTE0, 0x50);
+ rtl_set_bbreg(hw, 0xc58, MASKBYTE0, tmp_reg_c58);
+ }
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n");
+}
+
+static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmpreg;
+ u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ if ((tmpreg & 0x70) != 0) {
+ rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+
+ if (is2t)
+ rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+ MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+ (rf_a_mode & 0x8FFFF) | 0x10000);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ (rf_b_mode & 0x8FFFF) | 0x10000);
+ }
+ lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a);
+
+ mdelay(100);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0);
+
+ if ((tmpreg & 0x70) != 0) {
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00,
+ MASK12BITS, rf_b_mode);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+}
+
+static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
+ bool bmain, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+
+ if (is_hal_stop(rtlhal)) {
+ u8 u1btmp;
+ u1btmp = rtl_read_byte(rtlpriv, REG_LEDCFG0);
+ rtl_write_byte(rtlpriv, REG_LEDCFG0, u1btmp | BIT(7));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+ }
+ if (is2t) {
+ if (bmain)
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(6), 0x1);
+ else
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(6), 0x2);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(8) | BIT(9), 0);
+ rtl_set_bbreg(hw, 0x914, MASKLWORD, 0x0201);
+
+ /* We use the RF definition of MAIN and AUX,
+ * left antenna and right antenna repectively.
+ * Default output at AUX.
+ */
+ if (bmain) {
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(14) | BIT(13) | BIT(12), 0);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(4) | BIT(3), 0);
+ if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
+ rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 0);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(14) | BIT(13) | BIT(12), 1);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(4) | BIT(3), 1);
+ if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
+ rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 1);
+ }
+ }
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ long result[4][8];
+ u8 i, final_candidate;
+ bool patha_ok, pathb_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
+ reg_ecc, reg_tmp = 0;
+ bool is12simular, is13simular, is23simular;
+ u32 iqk_bb_reg[9] = {
+ ROFDM0_XARXIQIMBALANCE,
+ ROFDM0_XBRXIQIMBALANCE,
+ ROFDM0_ECCATHRESHOLD,
+ ROFDM0_AGCRSSITABLE,
+ ROFDM0_XATXIQIMBALANCE,
+ ROFDM0_XBTXIQIMBALANCE,
+ ROFDM0_XCTXAFE,
+ ROFDM0_XDTXAFE,
+ ROFDM0_RXIQEXTANTA
+ };
+
+ if (recovery) {
+ rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup, 9);
+ return;
+ }
+
+ for (i = 0; i < 8; i++) {
+ result[0][i] = 0;
+ result[1][i] = 0;
+ result[2][i] = 0;
+ result[3][i] = 0;
+ }
+ final_candidate = 0xff;
+ patha_ok = false;
+ pathb_ok = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+ for (i = 0; i < 3; i++) {
+ if (get_rf_type(rtlphy) == RF_2T2R)
+ _rtl8723be_phy_iq_calibrate(hw, result, i, true);
+ else
+ _rtl8723be_phy_iq_calibrate(hw, result, i, false);
+ if (i == 1) {
+ is12simular = phy_similarity_cmp(hw, result, 0, 1);
+ if (is12simular) {
+ final_candidate = 0;
+ break;
+ }
+ }
+ if (i == 2) {
+ is13simular = phy_similarity_cmp(hw, result, 0, 2);
+ if (is13simular) {
+ final_candidate = 0;
+ break;
+ }
+ is23simular = phy_similarity_cmp(hw, result, 1, 2);
+ if (is23simular) {
+ final_candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ reg_tmp += result[3][i];
+
+ if (reg_tmp != 0)
+ final_candidate = 3;
+ else
+ final_candidate = 0xFF;
+ }
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+ if (final_candidate != 0xff) {
+ reg_e94 = result[final_candidate][0];
+ rtlphy->reg_e94 = reg_e94;
+ reg_e9c = result[final_candidate][1];
+ rtlphy->reg_e9c = reg_e9c;
+ reg_ea4 = result[final_candidate][2];
+ reg_eac = result[final_candidate][3];
+ reg_eb4 = result[final_candidate][4];
+ rtlphy->reg_eb4 = reg_eb4;
+ reg_ebc = result[final_candidate][5];
+ rtlphy->reg_ebc = reg_ebc;
+ reg_ec4 = result[final_candidate][6];
+ reg_ecc = result[final_candidate][7];
+ patha_ok = true;
+ pathb_ok = true;
+ } else {
+ rtlphy->reg_e94 = 0x100;
+ rtlphy->reg_eb4 = 0x100;
+ rtlphy->reg_e9c = 0x0;
+ rtlphy->reg_ebc = 0x0;
+ }
+ if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
+ rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+ final_candidate,
+ (reg_ea4 == 0));
+ if (final_candidate != 0xFF) {
+ for (i = 0; i < IQK_MATRIX_REG_NUM; i++)
+ rtlphy->iqk_matrix[0].value[0][i] =
+ result[final_candidate][i];
+ rtlphy->iqk_matrix[0].iqk_done = true;
+ }
+ rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9);
+}
+
+void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u32 timeout = 2000, timecount = 0;
+
+ while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
+ udelay(50);
+ timecount += 50;
+ }
+
+ rtlphy->lck_inprogress = true;
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ "LCK:Start!!! currentband %x delay %d ms\n",
+ rtlhal->current_bandtype, timecount);
+
+ _rtl8723be_phy_lc_calibrate(hw, false);
+
+ rtlphy->lck_inprogress = false;
+}
+
+void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (rtlphy->apk_done)
+ return;
+
+ return;
+}
+
+void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+ _rtl8723be_phy_set_rfpath_switch(hw, bmain, false);
+}
+
+static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ rtlpriv->dm_digtable.cur_igvalue =
+ rtlphy->initgain_backup.xaagccore1;
+ /*rtl92c_dm_write_dig(hw);*/
+ rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel);
+ rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83);
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtlphy->initgain_backup.xaagccore1 =
+ rtlpriv->dm_digtable.cur_igvalue;
+ rtlpriv->dm_digtable.cur_igvalue = 0x17;
+ rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ rtlphy->set_io_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
+}
+
+bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool postprocessing = false;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
+ do {
+ switch (iotype) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
+ postprocessing = true;
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
+ postprocessing = true;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ break;
+ }
+ } while (false);
+ if (postprocessing && !rtlphy->set_io_inprogress) {
+ rtlphy->set_io_inprogress = true;
+ rtlphy->current_io_type = iotype;
+ } else {
+ return false;
+ }
+ rtl8723be_phy_set_io(hw);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ return true;
+}
+
+static void rtl8723be_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+static void _rtl8723be_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+
+static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = true;
+ u8 i, queue_id;
+ struct rtl8192_tx_ring *ring = NULL;
+
+ switch (rfpwr_state) {
+ case ERFON:
+ if ((ppsc->rfpwr_state == ERFOFF) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ bool rtstatus;
+ u32 initialize_count = 0;
+ do {
+ initialize_count++;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while (!rtstatus && (initialize_count < 10));
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON sleeped:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
+ ppsc->last_awake_jiffies = jiffies;
+ rtl8723be_phy_set_rf_on(hw);
+ }
+ if (mac->link_state == MAC80211_LINKED)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK);
+ else
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
+ break;
+ case ERFOFF:
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
+
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
+ break;
+ }
+ }
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_POWER_OFF);
+ }
+ }
+ break;
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+ break;
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
+
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl8723be_phy_set_rf_sleep(hw);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "switch case not process\n");
+ bresult = false;
+ break;
+ }
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+ return bresult;
+}
+
+bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ bool bresult = false;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return bresult;
+ bresult = _rtl8723be_phy_set_rf_power_state(hw, rfpwr_state);
+ return bresult;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
new file mode 100644
index 000000000000..444ef95bb6af
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
@@ -0,0 +1,217 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_PHY_H__
+#define __RTL8723BE_PHY_H__
+
+/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
+#define MAX_TX_COUNT 4
+#define TX_1S 0
+#define TX_2S 1
+
+#define MAX_POWER_INDEX 0x3F
+
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+#define RT_CANNOT_IO(hw) false
+#define HIGHPOWER_RADIOA_ARRAYLEN 22
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 10
+#define index_mapping_NUM 15
+
+#define APK_BB_REG_NUM 5
+#define APK_AFE_REG_NUM 16
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 1
+
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50
+#define ANTENNADIVERSITYVALUE 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define RESET_CNT_LIMIT 3
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+
+#define RF6052_MAX_PATH 2
+
+#define CT_OFFSET_MAC_ADDR 0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
+
+#define CT_OFFSET_CHANNEL_PLAH 0x75
+#define CT_OFFSET_THERMAL_METER 0x78
+#define CT_OFFSET_RF_OPTION 0x79
+#define CT_OFFSET_VERSION 0x7E
+#define CT_OFFSET_CUSTOMER_ID 0x7F
+
+#define RTL92C_MAX_PATH_NUM 2
+
+enum hw90_block_e {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+ ANTENNA_NONE,
+ ANTENNA_D,
+ ANTENNA_C,
+ ANTENNA_CD,
+ ANTENNA_B,
+ ANTENNA_BD,
+ ANTENNA_BC,
+ ANTENNA_BCD,
+ ANTENNA_A,
+ ANTENNA_AD,
+ ANTENNA_AC,
+ ANTENNA_ACD,
+ ANTENNA_AB,
+ ANTENNA_ABD,
+ ANTENNA_ABC,
+ ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 ofdm_txsc:2;
+ u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+
+struct efuse_contents {
+ u8 mac_addr[ETH_ALEN];
+ u8 cck_tx_power_idx[6];
+ u8 ht40_1s_tx_power_idx[6];
+ u8 ht40_2s_tx_power_idx_diff[3];
+ u8 ht20_tx_power_idx_diff[3];
+ u8 ofdm_tx_power_idx_diff[3];
+ u8 ht40_max_power_offset[3];
+ u8 ht20_max_power_offset[3];
+ u8 channel_plan;
+ u8 thermal_meter;
+ u8 rf_option[5];
+ u8 version;
+ u8 oem_id;
+ u8 regulatory;
+};
+
+struct tx_power_struct {
+ u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_txpowerdiff;
+ u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_cnt;
+ u32 mcs_original_offset[4][16];
+};
+
+enum _ANT_DIV_TYPE {
+ NO_ANTDIV = 0xFF,
+ CG_TRX_HW_ANTDIV = 0x01,
+ CGCS_RX_HW_ANTDIV = 0x02,
+ FIXED_HW_ANTDIV = 0x03,
+ CG_TRX_SMART_ANTDIV = 0x04,
+ CGCS_RX_SW_ANTDIV = 0x05,
+};
+
+u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw);
+void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw,
+ u8 channel);
+void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw,
+ u8 operation);
+void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
+ bool b_recovery);
+void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
new file mode 100644
index 000000000000..b5167e73fecf
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
@@ -0,0 +1,106 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+
+
+/* drivers should parse below arrays and do the corresponding actions */
+/*3 Power on Array*/
+struct wlan_pwr_cfg rtl8723B_power_on_flow[RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_CARDEMU_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/*3Radio off GPIO Array */
+struct wlan_pwr_cfg rtl8723B_radio_off_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS
+ + RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_END
+};
+
+/*3Card Disable Array*/
+struct wlan_pwr_cfg rtl8723B_card_disable_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_CARDDIS
+ RTL8723B_TRANS_END
+};
+
+/*3 Card Enable Array*/
+struct wlan_pwr_cfg rtl8723B_card_enable_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_CARDDIS_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/*3Suspend Array*/
+struct wlan_pwr_cfg rtl8723B_suspend_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_SUS
+ RTL8723B_TRANS_END
+};
+
+/*3 Resume Array*/
+struct wlan_pwr_cfg rtl8723B_resume_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_SUS_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/*3HWPDN Array*/
+struct wlan_pwr_cfg rtl8723B_hwpdn_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_PDN
+ RTL8723B_TRANS_END
+};
+
+/*3 Enter LPS */
+struct wlan_pwr_cfg rtl8723B_enter_lps_flow[RTL8723B_TRANS_ACT_TO_LPS_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ /*FW behavior*/
+ RTL8723B_TRANS_ACT_TO_LPS
+ RTL8723B_TRANS_END
+};
+
+/*3 Leave LPS */
+struct wlan_pwr_cfg rtl8723B_leave_lps_flow[RTL8723B_TRANS_LPS_TO_ACT_STEPS +
+ RTL8723B_TRANS_END_STEPS] = {
+ /*FW behavior*/
+ RTL8723B_TRANS_LPS_TO_ACT
+ RTL8723B_TRANS_END
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
new file mode 100644
index 000000000000..a62f43ed8d32
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
@@ -0,0 +1,304 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_PWRSEQ_H__
+#define __RTL8723BE_PWRSEQ_H__
+
+/* Check document WM-20130425-JackieLau-RTL8723B_Power_Architecture v05.vsd
+ * There are 6 HW Power States:
+ * 0: POFF--Power Off
+ * 1: PDN--Power Down
+ * 2: CARDEMU--Card Emulation
+ * 3: ACT--Active Mode
+ * 4: LPS--Low Power State
+ * 5: SUS--Suspend
+ *
+ * The transition from different states are defined below
+ * TRANS_CARDEMU_TO_ACT
+ * TRANS_ACT_TO_CARDEMU
+ * TRANS_CARDEMU_TO_SUS
+ * TRANS_SUS_TO_CARDEMU
+ * TRANS_CARDEMU_TO_PDN
+ * TRANS_ACT_TO_LPS
+ * TRANS_LPS_TO_ACT
+ *
+ * TRANS_END
+ */
+#define RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS 23
+#define RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS 15
+#define RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS 15
+#define RTL8723B_TRANS_SUS_TO_CARDEMU_STEPS 15
+#define RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS 15
+#define RTL8723B_TRANS_PDN_TO_CARDEMU_STEPS 15
+#define RTL8723B_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8723B_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8723B_TRANS_END_STEPS 1
+
+#define RTL8723B_TRANS_CARDEMU_TO_ACT \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS}, \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)|BIT(2)), 0}, \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , 0}, \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , BIT(0)}, \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, \
+ {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)}, \
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
+ {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0068, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3), BIT(3)}, \
+ {0x0069, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)},
+
+#define RTL8723B_TRANS_ACT_TO_CARDEMU \
+ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \
+ {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, \
+ {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), 0}, \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
+ PWR_CMD_WRITE, BIT(5), BIT(5)}, \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
+ PWR_CMD_WRITE, BIT(0), 0},
+
+#define RTL8723B_TRANS_CARDEMU_TO_SUS \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4) | BIT(3), (BIT(4) | BIT(3))}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
+ PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)},\
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0},
+
+#define RTL8723B_TRANS_SUS_TO_CARDEMU \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0},
+
+#define RTL8723B_TRANS_CARDEMU_TO_CARDDIS \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)}, \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 1}, \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0},
+
+#define RTL8723B_TRANS_CARDDIS_TO_CARDEMU \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},
+
+#define RTL8723B_TRANS_CARDEMU_TO_PDN \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
+ PWR_INTF_SDIO_MSK | PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, \
+ PWR_CMD_WRITE, 0xFF, 0x20}, \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},
+
+#define RTL8723B_TRANS_PDN_TO_CARDEMU \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},
+
+#define RTL8723B_TRANS_ACT_TO_LPS \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03}, \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
+ {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00}, \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)},
+
+#define RTL8723B_TRANS_LPS_TO_ACT \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
+ PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, \
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
+ PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},
+
+#define RTL8723B_TRANS_END \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, \
+ PWR_CMD_END, 0, 0},
+
+extern struct wlan_pwr_cfg rtl8723B_power_on_flow
+ [RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_radio_off_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_card_disable_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_card_enable_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_suspend_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_resume_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_hwpdn_flow
+ [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_enter_lps_flow
+ [RTL8723B_TRANS_ACT_TO_LPS_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_leave_lps_flow
+ [RTL8723B_TRANS_LPS_TO_ACT_STEPS +
+ RTL8723B_TRANS_END_STEPS];
+
+/* RTL8723 Power Configuration CMDs for PCIe interface */
+#define RTL8723_NIC_PWR_ON_FLOW rtl8723B_power_on_flow
+#define RTL8723_NIC_RF_OFF_FLOW rtl8723B_radio_off_flow
+#define RTL8723_NIC_DISABLE_FLOW rtl8723B_card_disable_flow
+#define RTL8723_NIC_ENABLE_FLOW rtl8723B_card_enable_flow
+#define RTL8723_NIC_SUSPEND_FLOW rtl8723B_suspend_flow
+#define RTL8723_NIC_RESUME_FLOW rtl8723B_resume_flow
+#define RTL8723_NIC_PDN_FLOW rtl8723B_hwpdn_flow
+#define RTL8723_NIC_LPS_ENTER_FLOW rtl8723B_enter_lps_flow
+#define RTL8723_NIC_LPS_LEAVE_FLOW rtl8723B_leave_lps_flow
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c
new file mode 100644
index 000000000000..e4a507a756fb
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseq.h"
+
+/* Description:
+ * This routine deal with the Power Configuration CMDs
+ * parsing for RTL8723/RTL8188E Series IC.
+ * Assumption:
+ * We should follow specific format which was released from HW SD.
+ *
+ * 2011.07.07, added by Roger.
+ */
+bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[])
+
+{
+ struct wlan_pwr_cfg pwr_cfg_cmd = {0};
+ bool b_polling_bit = false;
+ u32 ary_idx = 0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 polling_count = 0;
+ u32 max_polling_cnt = 5000;
+
+ do {
+ pwr_cfg_cmd = pwrcfgcmd[ary_idx];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "offset(%#x),cut_msk(%#x), fab_msk(%#x),"
+ "interface_msk(%#x), base(%#x), "
+ "cmd(%#x), msk(%#x), value(%#x)\n",
+ GET_PWR_CFG_OFFSET(pwr_cfg_cmd),
+ GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_BASE(pwr_cfg_cmd),
+ GET_PWR_CFG_CMD(pwr_cfg_cmd),
+ GET_PWR_CFG_MASK(pwr_cfg_cmd),
+ GET_PWR_CFG_VALUE(pwr_cfg_cmd));
+
+ if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) &&
+ (GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) &&
+ (GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) {
+ switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_READ\n");
+ break;
+ case PWR_CMD_WRITE:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_WRITE\n");
+ offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+ /*Read the value from system register*/
+ value = rtl_read_byte(rtlpriv, offset);
+ value &= (~(GET_PWR_CFG_MASK(pwr_cfg_cmd)));
+ value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
+ & GET_PWR_CFG_MASK(pwr_cfg_cmd));
+
+ /*Write the value back to sytem register*/
+ rtl_write_byte(rtlpriv, offset, value);
+ break;
+ case PWR_CMD_POLLING:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_POLLING\n");
+ b_polling_bit = false;
+ offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+ do {
+ value = rtl_read_byte(rtlpriv, offset);
+
+ value &= GET_PWR_CFG_MASK(pwr_cfg_cmd);
+ if (value ==
+ (GET_PWR_CFG_VALUE(pwr_cfg_cmd) &
+ GET_PWR_CFG_MASK(pwr_cfg_cmd)))
+ b_polling_bit = true;
+ else
+ udelay(10);
+
+ if (polling_count++ > max_polling_cnt)
+ return false;
+
+ } while (!b_polling_bit);
+ break;
+ case PWR_CMD_DELAY:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_DELAY\n");
+ if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) ==
+ PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+ else
+ mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+ break;
+ case PWR_CMD_END:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "PWR_CMD_END\n");
+ return true;
+ break;
+ default:
+ RT_ASSERT(false,
+ "rtlbe_hal_pwrseqcmdparsing(): "
+ "Unknown CMD!!\n");
+ break;
+ }
+ }
+
+ ary_idx++;
+ } while (1);
+
+ return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h
new file mode 100644
index 000000000000..ce14a3b5cb71
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h
@@ -0,0 +1,95 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_PWRSEQCMD_H__
+#define __RTL8723BE_PWRSEQCMD_H__
+
+#include "../wifi.h"
+/*---------------------------------------------*/
+/*The value of cmd: 4 bits */
+/*---------------------------------------------*/
+#define PWR_CMD_READ 0x00
+#define PWR_CMD_WRITE 0x01
+#define PWR_CMD_POLLING 0x02
+#define PWR_CMD_DELAY 0x03
+#define PWR_CMD_END 0x04
+
+/* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define PWR_FAB_TSMC_MSK BIT(0)
+#define PWR_FAB_UMC_MSK BIT(1)
+#define PWR_FAB_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+
+
+enum pwrseq_delay_unit {
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+
+};
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
+#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
+#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
+#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
+#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
+
+bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+ u8 fab_version, u8 interface_type,
+ struct wlan_pwr_cfg pwrcfgcmd[]);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/rtlwifi/rtl8723be/reg.h
new file mode 100644
index 000000000000..4c653fab8795
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/reg.h
@@ -0,0 +1,2277 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_REG_H__
+#define __RTL8723BE_REG_H__
+
+#define TXPKT_BUF_SELECT 0x69
+#define RXPKT_BUF_SELECT 0xA5
+#define DISABLE_TRXPKT_BUF_ACCESS 0x0
+
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+/* 1.5v for 8188EE test chip, 1.4v for MP chip */
+#define REG_AFE_LDO_CTRL 0x0027
+#define REG_AFE_PLL_CTRL 0x0028
+#define REG_MAC_PHY_CTRL 0x002c
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_PWR_DATA 0x0038
+#define REG_CAL_TIMER 0x003C
+#define REG_ACLK_MON 0x003E
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+#define REG_GPIO_PIN_CTRL_2 0x0060
+#define REG_GPIO_IO_SEL_2 0x0062
+#define REG_MULTI_FUNC_CTRL 0x0068
+#define REG_GPIO_OUTPUT 0x006c
+#define REG_AFE_XTAL_CTRL_EXT 0x0078
+#define REG_XCK_OUT_CTRL 0x007c
+#define REG_MCUFWDL 0x0080
+#define REG_WOL_EVENT 0x0081
+#define REG_MCUTSTCFG 0x0084
+
+
+#define REG_HIMR 0x00B0
+#define REG_HISR 0x00B4
+#define REG_HIMRE 0x00B8
+#define REG_HISRE 0x00BC
+
+#define REG_EFUSE_ACCESS 0x00CF
+
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4
+#define REG_SYS_CFG1 0x00F0
+#define REG_ROM_VERSION 0x00FD
+
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL + 2)
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_32K_CTRL 0x0194
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_CLEAR 0x01AF
+#define REG_C2HEVT_MSG_TEST 0x01B8
+#define REG_MCUTST_1 0x01c0
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+
+#define REG_LLT_INIT 0x01E0
+#define REG_BB_ACCEESS_CTRL 0x01E8
+#define REG_BB_ACCESS_DATA 0x01EC
+
+#define REG_HMEBOX_EXT_0 0x01F0
+#define REG_HMEBOX_EXT_1 0x01F4
+#define REG_HMEBOX_EXT_2 0x01F8
+#define REG_HMEBOX_EXT_3 0x01FC
+
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+#define REG_RXDMA_AGG_PG_TH 0x0280
+/* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */
+#define REG_FW_UPD_RDPTR 0x0284
+/* Control the RX DMA.*/
+#define REG_RXDMA_CONTROL 0x0286
+/* The number of packets in RXPKTBUF. */
+#define REG_RXPKT_NUM 0x0287
+
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304
+#define REG_BCNQ_DESA 0x0308
+#define REG_HQ_DESA 0x0310
+#define REG_MGQ_DESA 0x0318
+#define REG_VOQ_DESA 0x0320
+#define REG_VIQ_DESA 0x0328
+#define REG_BEQ_DESA 0x0330
+#define REG_BKQ_DESA 0x0338
+#define REG_RX_DESA 0x0340
+
+#define REG_DBI 0x0348
+#define REG_MDIO 0x0354
+#define REG_DBG_SEL 0x0360
+#define REG_PCIE_HRPWM 0x0361
+#define REG_PCIE_HCPWM 0x0363
+#define REG_UART_CTRL 0x0364
+#define REG_WATCH_DOG 0x0368
+#define REG_UART_TX_DESA 0x0370
+#define REG_UART_RX_DESA 0x0378
+
+
+#define REG_HDAQ_DESA_NODEF 0x0000
+#define REG_CMDQ_DESA_NODEF 0x0000
+
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+#define REG_TXPKT_EMPTY 0x041A
+
+
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_MULTI_BCNQ_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x0448
+#define REG_ARFR2 0x044C
+#define REG_ARFR3 0x0450
+#define REG_AMPDU_MAX_TIME 0x0456
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_INIDATA_RATE_SEL 0x0484
+#define REG_POWER_STATUS 0x04A4
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_LIFE_TIME 0x04C0
+#define REG_STBC_SETTING 0x04C4
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_EARLY_MODE_CONTROL 0x04D0
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_TX_RPT_CTRL 0x04EC
+#define REG_TX_RPT_TIME 0x04F0
+#define REG_DUMMY 0x04FC
+
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_USTIME_TSF 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define REG_BCN_INTERVAL 0x0554
+#define REG_MBSSID_BCN_SPACE 0x0554
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_INIT_TSFTR 0x0564
+#define REG_SECONDARY_CCA_CTRL 0x0577
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+#define REG_ACMRSTCTRL 0x05C1
+#define REG_ACMAVG 0x05C2
+#define REG_VO_ADMTIME 0x05C4
+#define REG_VI_ADMTIME 0x05C6
+#define REG_BE_ADMTIME 0x05C8
+#define REG_EDCA_RANDOM_GEN 0x05CC
+#define REG_SCH_TXCMD 0x05D0
+
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+#define REG_RESP_SIFS_CCK 0x063C
+#define REG_RESP_SIFS_OFDM 0x063E
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+#define REG_NAV_CTRL 0x0650
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_TRXPTCL_CTL 0x0668
+
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+#define REG_WOW_CTRL 0x0690
+#define REG_PSSTATUS 0x0691
+#define REG_PS_RX_INFO 0x0692
+#define REG_UAPSD_TID 0x0693
+#define REG_LPNAV_CTRL 0x0694
+#define REG_WKFMCAM_NUM 0x0698
+#define REG_WKFMCAM_RWD 0x069C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_CALB32K_CTRL 0x06AC
+#define REG_PKT_MON_CTRL 0x06B4
+#define REG_BT_COEX_TABLE 0x06C0
+#define REG_WMAC_RESP_TXINFO 0x06D8
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_TEST_USB_TXQS 0xFE48
+#define REG_TEST_SIE_VID 0xFE60
+#define REG_TEST_SIE_PID 0xFE62
+#define REG_TEST_SIE_OPTIONAL 0xFE64
+#define REG_TEST_SIE_CHIRP_K 0xFE65
+#define REG_TEST_SIE_PHY 0xFE66
+#define REG_TEST_SIE_MAC_ADDR 0xFE70
+#define REG_TEST_SIE_STRING 0xFE80
+
+#define REG_NORMAL_SIE_VID 0xFE60
+#define REG_NORMAL_SIE_PID 0xFE62
+#define REG_NORMAL_SIE_OPTIONAL 0xFE64
+#define REG_NORMAL_SIE_EP 0xFE65
+#define REG_NORMAL_SIE_PHY 0xFE68
+#define REG_NORMAL_SIE_MAC_ADDR 0xFE70
+#define REG_NORMAL_SIE_STRING 0xFE80
+
+#define CR9346 REG_9346CR
+#define MSR (REG_CR + 2)
+#define ISR REG_HISR
+#define TSFR REG_TSFTR
+
+#define MACIDR0 REG_MACID
+#define MACIDR4 (REG_MACID + 4)
+
+#define PBP REG_PBP
+
+#define IDR0 MACIDR0
+#define IDR4 MACIDR4
+
+#define UNUSED_REGISTER 0x1BF
+#define DCAM UNUSED_REGISTER
+#define PSR UNUSED_REGISTER
+#define BBADDR UNUSED_REGISTER
+#define PHYDATAR UNUSED_REGISTER
+
+#define INVALID_BBRF_VALUE 0x12345678
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define CMDEEPROM_EN BIT(5)
+#define CMDEEPROM_SEL BIT(4)
+#define CMD9346CR_9356SEL BIT(4)
+#define AUTOLOAD_EEPROM (CMDEEPROM_EN | CMDEEPROM_SEL)
+#define AUTOLOAD_EFUSE CMDEEPROM_EN
+
+#define GPIOSEL_GPIO 0
+#define GPIOSEL_ENBT BIT(5)
+
+#define GPIO_IN REG_GPIO_PIN_CTRL
+#define GPIO_OUT (REG_GPIO_PIN_CTRL + 1)
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3)
+
+/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+#define HSIMR_GPIO12_0_INT_EN BIT(0)
+#define HSIMR_SPS_OCP_INT_EN BIT(5)
+#define HSIMR_RON_INT_EN BIT(6)
+#define HSIMR_PDN_INT_EN BIT(7)
+#define HSIMR_GPIO9_INT_EN BIT(25)
+
+/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
+
+#define HSISR_GPIO12_0_INT BIT(0)
+#define HSISR_SPS_OCP_INT BIT(5)
+#define HSISR_RON_INT_EN BIT(6)
+#define HSISR_PDNINT BIT(7)
+#define HSISR_GPIO9_INT BIT(25)
+
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+#define RRSR_RSC_OFFSET 21
+#define RRSR_SHORT_OFFSET 23
+#define RRSR_RSC_BW_40M 0x600000
+#define RRSR_RSC_UPSUBCHNL 0x400000
+#define RRSR_RSC_LOWSUBCHNL 0x200000
+#define RRSR_SHORT 0x800000
+#define RRSR_1M BIT(0)
+#define RRSR_2M BIT(1)
+#define RRSR_5_5M BIT(2)
+#define RRSR_11M BIT(3)
+#define RRSR_6M BIT(4)
+#define RRSR_9M BIT(5)
+#define RRSR_12M BIT(6)
+#define RRSR_18M BIT(7)
+#define RRSR_24M BIT(8)
+#define RRSR_36M BIT(9)
+#define RRSR_48M BIT(10)
+#define RRSR_54M BIT(11)
+#define RRSR_MCS0 BIT(12)
+#define RRSR_MCS1 BIT(13)
+#define RRSR_MCS2 BIT(14)
+#define RRSR_MCS3 BIT(15)
+#define RRSR_MCS4 BIT(16)
+#define RRSR_MCS5 BIT(17)
+#define RRSR_MCS6 BIT(18)
+#define RRSR_MCS7 BIT(19)
+#define BRSR_ACKSHORTPMB BIT(23)
+
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
+ RATR_24M | RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\
+ RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\
+ RATR_MCS6 | RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\
+ RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\
+ RATR_MCS14 | RATR_MCS15)
+
+#define BW_OPMODE_20MHZ BIT(2)
+#define BW_OPMODE_5G BIT(1)
+#define BW_OPMODE_11J BIT(0)
+
+#define CAM_VALID BIT(15)
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT(5)
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_WRITE BIT(16)
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT(31)
+
+#define SCR_USEDK 0x01
+#define SCR_TXSEC_ENABLE 0x02
+#define SCR_RXSEC_ENABLE 0x04
+
+#define WOW_PMEN BIT(0)
+#define WOW_WOMEN BIT(1)
+#define WOW_MAGIC BIT(2)
+#define WOW_UWF BIT(3)
+
+/*********************************************
+* 8723BE IMR/ISR bits
+**********************************************/
+#define IMR_DISABLED 0x0
+/* IMR DW0(0x0060-0063) Bit 0-31 */
+#define IMR_TXCCK BIT(30) /* TXRPT interrupt when
+ * CCX bit of the packet is set
+ */
+#define IMR_PSTIMEOUT BIT(29) /* Power Save Time Out Interrupt */
+#define IMR_GTINT4 BIT(28) /* When GTIMER4 expires,
+ * this bit is set to 1
+ */
+#define IMR_GTINT3 BIT(27) /* When GTIMER3 expires,
+ * this bit is set to 1
+ */
+#define IMR_TBDER BIT(26) /* Transmit Beacon0 Error */
+#define IMR_TBDOK BIT(25) /* Transmit Beacon0 OK */
+#define IMR_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle
+ * indication interrupt
+ */
+#define IMR_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */
+#define IMR_BCNDOK0 BIT(16) /* Beacon Queue DMA OK0 */
+#define IMR_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR & HSISR is
+ * true, this bit is set to 1)
+ */
+#define IMR_BCNDMAINT_E BIT(14) /* Beacon DMA Interrupt
+ * Extension for Win7
+ */
+#define IMR_ATIMEND BIT(12) /* CTWidnow End or ATIM Window End */
+#define IMR_HISR1_IND_INT BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is
+ * true, this bit is set to 1)
+ */
+#define IMR_C2HCMD BIT(10) /* CPU to Host Command INT Status,
+ * Write 1 clear
+ */
+#define IMR_CPWM2 BIT(9) /* CPU power Mode exchange INT Status,
+ * Write 1 clear
+ */
+#define IMR_CPWM BIT(8) /* CPU power Mode exchange INT Status,
+ * Write 1 clear
+ */
+#define IMR_HIGHDOK BIT(7) /* High Queue DMA OK */
+#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK */
+#define IMR_BKDOK BIT(5) /* AC_BK DMA OK */
+#define IMR_BEDOK BIT(4) /* AC_BE DMA OK */
+#define IMR_VIDOK BIT(3) /* AC_VI DMA OK */
+#define IMR_VODOK BIT(2) /* AC_VO DMA OK */
+#define IMR_RDU BIT(1) /* Rx Descriptor Unavailable */
+#define IMR_ROK BIT(0) /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define IMR_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */
+#define IMR_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrup 7 */
+#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrup 6 */
+#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrup 5 */
+#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrup 4 */
+#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrup 3 */
+#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrup 2 */
+#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrup 1 */
+#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */
+#define IMR_TXERR BIT(11) /* Tx Error Flag Interrupt Status,
+ * write 1 clear.
+ */
+#define IMR_RXERR BIT(10) /* Rx Error Flag INT Status,
+ * Write 1 clear
+ */
+#define IMR_TXFOVW BIT(9) /* Transmit FIFO Overflow */
+#define IMR_RXFOVW BIT(8) /* Receive FIFO Overflow */
+
+#define HWSET_MAX_SIZE 512
+#define EFUSE_MAX_SECTION 64
+#define EFUSE_REAL_CONTENT_LEN 256
+#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header,
+ * dummy 7 bytes frome CP test
+ * and reserved 1byte.
+ */
+
+#define EEPROM_DEFAULT_TSSI 0x0
+#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_BOARDTYPE 0x02
+#define EEPROM_DEFAULT_TXPOWER 0x1010
+#define EEPROM_DEFAULT_HT2T_TXPWR 0x10
+
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_THERMALMETER 0x18
+#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
+#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
+#define EEPROM_DEFAULT_HT20_DIFF 2
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
+#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
+
+#define RF_OPTION1 0x79
+#define RF_OPTION2 0x7A
+#define RF_OPTION3 0x7B
+#define RF_OPTION4 0xC3
+
+#define EEPROM_DEFAULT_PID 0x1234
+#define EEPROM_DEFAULT_VID 0x5678
+#define EEPROM_DEFAULT_CUSTOMERID 0xAB
+#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD
+#define EEPROM_DEFAULT_VERSION 0
+
+#define EEPROM_CHANNEL_PLAN_FCC 0x0
+#define EEPROM_CHANNEL_PLAN_IC 0x1
+#define EEPROM_CHANNEL_PLAN_ETSI 0x2
+#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
+#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
+#define EEPROM_CHANNEL_PLAN_MKK 0x5
+#define EEPROM_CHANNEL_PLAN_MKK1 0x6
+#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
+#define EEPROM_CHANNEL_PLAN_TELEC 0x8
+#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
+#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
+#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_TOSHIBA 0x4
+#define EEPROM_CID_CCX 0x10
+#define EEPROM_CID_QMI 0x0D
+#define EEPROM_CID_WHQL 0xFE
+
+#define RTL8723BE_EEPROM_ID 0x8129
+
+#define EEPROM_HPON 0x02
+#define EEPROM_CLK 0x06
+#define EEPROM_TESTR 0x08
+
+
+#define EEPROM_TXPOWERCCK 0x10
+#define EEPROM_TXPOWERHT40_1S 0x16
+#define EEPROM_TXPOWERHT20DIFF 0x1B
+#define EEPROM_TXPOWER_OFDMDIFF 0x1B
+
+
+
+#define EEPROM_TX_PWR_INX 0x10
+
+#define EEPROM_CHANNELPLAN 0xB8
+#define EEPROM_XTAL_8723BE 0xB9
+#define EEPROM_THERMAL_METER_88E 0xBA
+#define EEPROM_IQK_LCK_88E 0xBB
+
+#define EEPROM_RF_BOARD_OPTION_88E 0xC1
+#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
+#define EEPROM_RF_BT_SETTING_88E 0xC3
+#define EEPROM_VERSION 0xC4
+#define EEPROM_CUSTOMER_ID 0xC5
+#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
+
+#define EEPROM_MAC_ADDR 0xD0
+#define EEPROM_VID 0xD6
+#define EEPROM_DID 0xD8
+#define EEPROM_SVID 0xDA
+#define EEPROM_SMID 0xDC
+
+#define STOPBECON BIT(6)
+#define STOPHIGHT BIT(5)
+#define STOPMGT BIT(4)
+#define STOPVO BIT(3)
+#define STOPVI BIT(2)
+#define STOPBE BIT(1)
+#define STOPBK BIT(0)
+
+#define RCR_APPFCS BIT(31)
+#define RCR_APP_MIC BIT(30)
+#define RCR_APP_ICV BIT(29)
+#define RCR_APP_PHYST_RXFF BIT(28)
+#define RCR_APP_BA_SSN BIT(27)
+#define RCR_ENMBID BIT(24)
+#define RCR_LSIGEN BIT(23)
+#define RCR_MFBEN BIT(22)
+#define RCR_HTC_LOC_CTRL BIT(14)
+#define RCR_AMF BIT(13)
+#define RCR_ACF BIT(12)
+#define RCR_ADF BIT(11)
+#define RCR_AICV BIT(9)
+#define RCR_ACRC32 BIT(8)
+#define RCR_CBSSID_BCN BIT(7)
+#define RCR_CBSSID_DATA BIT(6)
+#define RCR_CBSSID RCR_CBSSID_DATA
+#define RCR_APWRMGT BIT(5)
+#define RCR_ADD3 BIT(4)
+#define RCR_AB BIT(3)
+#define RCR_AM BIT(2)
+#define RCR_APM BIT(1)
+#define RCR_AAP BIT(0)
+#define RCR_MXDMA_OFFSET 8
+#define RCR_FIFO_OFFSET 13
+
+#define RSV_CTRL 0x001C
+#define RD_CTRL 0x0524
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_USB_VID 0xFE60
+#define REG_USB_PID 0xFE62
+#define REG_USB_OPTIONAL 0xFE64
+#define REG_USB_CHIRP_K 0xFE65
+#define REG_USB_PHY 0xFE66
+#define REG_USB_MAC_ADDR 0xFE70
+#define REG_USB_HRPWM 0xFE58
+#define REG_USB_HCPWM 0xFE57
+
+#define SW18_FPWM BIT(3)
+
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+
+#define PWC_EV25V BIT(14)
+#define PWC_EV12V BIT(15)
+
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTN BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_DIO_RF BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define ENPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+#define _80M_SSC_DIS BIT(7)
+#define _80M_SSC_EN_HO BIT(8)
+#define PHY_SSC_RSTB BIT(9)
+#define SEC_CLK_EN BIT(10)
+#define MAC_CLK_EN BIT(11)
+#define SYS_CLK_EN BIT(12)
+#define RING_CLK_EN BIT(13)
+
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROM_EN BIT(5)
+
+#define AFE_BGEN BIT(0)
+#define AFE_MBEN BIT(1)
+#define MAC_ID_EN BIT(7)
+
+#define WLOCK_ALL BIT(0)
+#define WLOCK_00 BIT(1)
+#define WLOCK_04 BIT(2)
+#define WLOCK_08 BIT(3)
+#define WLOCK_40 BIT(4)
+#define R_DIS_PRST_0 BIT(5)
+#define R_DIS_PRST_1 BIT(6)
+#define LOCK_ALL_EN BIT(7)
+
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+#define LDA15_EN BIT(0)
+#define LDA15_STBY BIT(1)
+#define LDA15_OBUF BIT(2)
+#define LDA15_REG_VOS BIT(3)
+#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
+
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+#define XTAL_EN BIT(0)
+#define XTAL_BSEL BIT(1)
+#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
+#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
+#define XTAL_GATE_USB BIT(8)
+#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
+#define XTAL_GATE_AFE BIT(11)
+#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
+#define XTAL_RF_GATE BIT(14)
+#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
+#define XTAL_GATE_DIG BIT(17)
+#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
+#define XTAL_BT_GATE BIT(20)
+#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
+#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
+
+#define CKDLY_AFE BIT(26)
+#define CKDLY_USB BIT(27)
+#define CKDLY_DIG BIT(28)
+#define CKDLY_BT BIT(29)
+
+#define APLL_EN BIT(0)
+#define APLL_320_EN BIT(1)
+#define APLL_FREF_SEL BIT(2)
+#define APLL_EDGE_SEL BIT(3)
+#define APLL_WDOGB BIT(4)
+#define APLL_LPFEN BIT(5)
+
+#define APLL_REF_CLK_13MHZ 0x1
+#define APLL_REF_CLK_19_2MHZ 0x2
+#define APLL_REF_CLK_20MHZ 0x3
+#define APLL_REF_CLK_25MHZ 0x4
+#define APLL_REF_CLK_26MHZ 0x5
+#define APLL_REF_CLK_38_4MHZ 0x6
+#define APLL_REF_CLK_40MHZ 0x7
+
+#define APLL_320EN BIT(14)
+#define APLL_80EN BIT(15)
+#define APLL_1MEN BIT(24)
+
+#define ALD_EN BIT(18)
+#define EF_PD BIT(19)
+#define EF_FLAG BIT(31)
+
+#define EF_TRPT BIT(7)
+#define LDOE25_EN BIT(31)
+
+#define RSM_EN BIT(0)
+#define TIMER_EN BIT(4)
+
+#define TRSW0EN BIT(2)
+#define TRSW1EN BIT(3)
+#define EROM_EN BIT(4)
+#define ENBT BIT(5)
+#define ENUART BIT(8)
+#define UART_910 BIT(9)
+#define ENPMAC BIT(10)
+#define SIC_SWRST BIT(11)
+#define ENSIC BIT(12)
+#define SIC_23 BIT(13)
+#define ENHDP BIT(14)
+#define SIC_LBK BIT(15)
+
+#define LED0PL BIT(4)
+#define LED1PL BIT(12)
+#define LED0DIS BIT(7)
+
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_CHKSUM_RPT BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define CPRST BIT(23)
+
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define CHIP_VER_RTL_MASK 0xF000
+#define CHIP_VER_RTL_SHIFT 12
+
+#define REG_LBMODE (REG_CR + 3)
+
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+
+#define _NETTYPE(x) (((x) & 0x3) << 16)
+#define MASK_NETTYPE 0x30000
+#define NT_NO_LINK 0x0
+#define NT_LINK_AD_HOC 0x1
+#define NT_LINK_AP 0x2
+#define NT_AS_AP 0x3
+
+#define _LBMODE(x) (((x) & 0xF) << 24)
+#define MASK_LBMODE 0xF000000
+#define LOOPBACK_NORMAL 0x0
+#define LOOPBACK_IMMEDIATELY 0xB
+#define LOOPBACK_MAC_DELAY 0x3
+#define LOOPBACK_PHY 0x1
+#define LOOPBACK_DMA 0x7
+
+#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
+#define _PSRX_MASK 0xF
+#define _PSTX_MASK 0xF0
+#define _PSRX(x) (x)
+#define _PSTX(x) ((x) << 4)
+
+#define PBP_64 0x0
+#define PBP_128 0x1
+#define PBP_256 0x2
+#define PBP_512 0x3
+#define PBP_1024 0x4
+
+#define RXDMA_ARBBW_EN BIT(0)
+#define RXSHFT_EN BIT(1)
+#define RXDMA_AGG_EN BIT(2)
+#define QS_VO_QUEUE BIT(8)
+#define QS_VI_QUEUE BIT(9)
+#define QS_BE_QUEUE BIT(10)
+#define QS_BK_QUEUE BIT(11)
+#define QS_MANAGER_QUEUE BIT(12)
+#define QS_HIGH_QUEUE BIT(13)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
+#define BB_WRITE_EN BIT(30)
+#define BB_READ_EN BIT(31)
+
+#define _HPQ(x) ((x) & 0xFF)
+#define _LPQ(x) (((x) & 0xFF) << 8)
+#define _PUBQ(x) (((x) & 0xFF) << 16)
+#define _NPQ(x) ((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS BIT(24)
+#define LPQ_PUBLIC_DIS BIT(25)
+#define LD_RQPN BIT(31)
+
+#define BCN_VALID BIT(16)
+#define BCN_HEAD(x) (((x) & 0xFF) << 8)
+#define BCN_HEAD_MASK 0xFF00
+
+#define BLK_DESC_NUM_SHIFT 4
+#define BLK_DESC_NUM_MASK 0xF
+
+#define DROP_DATA_EN BIT(9)
+
+#define EN_AMPDU_RTY_NEW BIT(7)
+
+#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
+
+#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
+
+#define RATE_REG_BITMAP_ALL 0xFFFFF
+
+#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
+
+#define _RRSR_RSC(x) (((x) & 0x3) << 21)
+#define RRSR_RSC_RESERVED 0x0
+#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
+#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
+#define RRSR_RSC_DUPLICATE_MODE 0x3
+
+#define USE_SHORT_G1 BIT(20)
+
+#define _AGGLMT_MCS0(x) ((x) & 0xF)
+#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
+#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
+#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
+#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
+#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
+#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
+#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
+
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+#define _DARF_RC1(x) ((x) & 0x1F)
+#define _DARF_RC2(x) (((x) & 0x1F) << 8)
+#define _DARF_RC3(x) (((x) & 0x1F) << 16)
+#define _DARF_RC4(x) (((x) & 0x1F) << 24)
+#define _DARF_RC5(x) ((x) & 0x1F)
+#define _DARF_RC6(x) (((x) & 0x1F) << 8)
+#define _DARF_RC7(x) (((x) & 0x1F) << 16)
+#define _DARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define _RARF_RC1(x) ((x) & 0x1F)
+#define _RARF_RC2(x) (((x) & 0x1F) << 8)
+#define _RARF_RC3(x) (((x) & 0x1F) << 16)
+#define _RARF_RC4(x) (((x) & 0x1F) << 24)
+#define _RARF_RC5(x) ((x) & 0x1F)
+#define _RARF_RC6(x) (((x) & 0x1F) << 8)
+#define _RARF_RC7(x) (((x) & 0x1F) << 16)
+#define _RARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+#define _AIFS(x) (x)
+#define _ECW_MAX_MIN(x) ((x) << 8)
+#define _TXOP_LIMIT(x) ((x) << 16)
+
+#define _BCNIFS(x) ((x) & 0xFF)
+#define _BCNECW(x) ((((x) & 0xF)) << 8)
+
+#define _LRL(x) ((x) & 0x3F)
+#define _SRL(x) (((x) & 0x3F) << 8)
+
+#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
+#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8)
+
+#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
+#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8)
+
+#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
+
+#define DIS_EDCA_CNT_DWN BIT(11)
+
+#define EN_MBSSID BIT(1)
+#define EN_TXBCN_RPT BIT(2)
+#define EN_BCN_FUNCTION BIT(3)
+
+#define TSFTR_RST BIT(0)
+#define TSFTR1_RST BIT(1)
+
+#define STOP_BCNQ BIT(6)
+
+#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
+#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
+
+#define ACMHW_HWEN BIT(0)
+#define ACMHW_BEQEN BIT(1)
+#define ACMHW_VIQEN BIT(2)
+#define ACMHW_VOQEN BIT(3)
+#define ACMHW_BEQSTATUS BIT(4)
+#define ACMHW_VIQSTATUS BIT(5)
+#define ACMHW_VOQSTATUS BIT(6)
+
+#define APSDOFF BIT(6)
+#define APSDOFF_STATUS BIT(7)
+
+#define BW_20MHZ BIT(2)
+
+#define RATE_BITMAP_ALL 0xFFFFF
+
+#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
+
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+#define AAP BIT(0)
+#define APM BIT(1)
+#define AM BIT(2)
+#define AB BIT(3)
+#define ADD3 BIT(4)
+#define APWRMGT BIT(5)
+#define CBSSID BIT(6)
+#define CBSSID_DATA BIT(6)
+#define CBSSID_BCN BIT(7)
+#define ACRC32 BIT(8)
+#define AICV BIT(9)
+#define ADF BIT(11)
+#define ACF BIT(12)
+#define AMF BIT(13)
+#define HTC_LOC_CTRL BIT(14)
+#define UC_DATA_EN BIT(16)
+#define BM_DATA_EN BIT(17)
+#define MFBEN BIT(22)
+#define LSIGEN BIT(23)
+#define ENMBID BIT(24)
+#define APP_BASSN BIT(27)
+#define APP_PHYSTS BIT(28)
+#define APP_ICV BIT(29)
+#define APP_MIC BIT(30)
+#define APP_FCS BIT(31)
+
+#define _MIN_SPACE(x) ((x) & 0x7)
+#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
+
+#define RXERR_TYPE_OFDM_PPDU 0
+#define RXERR_TYPE_OFDM_FALSE_ALARM 1
+#define RXERR_TYPE_OFDM_MPDU_OK 2
+#define RXERR_TYPE_OFDM_MPDU_FAIL 3
+#define RXERR_TYPE_CCK_PPDU 4
+#define RXERR_TYPE_CCK_FALSE_ALARM 5
+#define RXERR_TYPE_CCK_MPDU_OK 6
+#define RXERR_TYPE_CCK_MPDU_FAIL 7
+#define RXERR_TYPE_HT_PPDU 8
+#define RXERR_TYPE_HT_FALSE_ALARM 9
+#define RXERR_TYPE_HT_MPDU_TOTAL 10
+#define RXERR_TYPE_HT_MPDU_OK 11
+#define RXERR_TYPE_HT_MPDU_FAIL 12
+#define RXERR_TYPE_RX_FULL_DROP 15
+
+#define RXERR_COUNTER_MASK 0xFFFFF
+#define RXERR_RPT_RST BIT(27)
+#define _RXERR_RPT_SEL(type) ((type) << 28)
+
+#define SCR_TXUSEDK BIT(0)
+#define SCR_RXUSEDK BIT(1)
+#define SCR_TXENCENABLE BIT(2)
+#define SCR_RXDECENABLE BIT(3)
+#define SCR_SKBYA2 BIT(4)
+#define SCR_NOSKMC BIT(5)
+#define SCR_TXBCUSEDK BIT(6)
+#define SCR_RXBCUSEDK BIT(7)
+
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define USB_IS_HIGH_SPEED 0
+#define USB_IS_FULL_SPEED 1
+#define USB_SPEED_MASK BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK 0xF
+#define USB_NORMAL_SIE_EP_SHIFT 4
+
+#define USB_TEST_EP_MASK 0x30
+#define USB_TEST_EP_SHIFT 4
+
+#define USB_AGG_EN BIT(3)
+
+#define MAC_ADDR_LEN 6
+#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 3000
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
+#define EPROM_CMD_CONFIG 0x3
+#define EPROM_CMD_LOAD 1
+
+#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
+
+#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
+
+#define RPMAC_RESET 0x100
+#define RPMAC_TXSTART 0x104
+#define RPMAC_TXLEGACYSIG 0x108
+#define RPMAC_TXHTSIG1 0x10c
+#define RPMAC_TXHTSIG2 0x110
+#define RPMAC_PHYDEBUG 0x114
+#define RPMAC_TXPACKETNUM 0x118
+#define RPMAC_TXIDLE 0x11c
+#define RPMAC_TXMACHEADER0 0x120
+#define RPMAC_TXMACHEADER1 0x124
+#define RPMAC_TXMACHEADER2 0x128
+#define RPMAC_TXMACHEADER3 0x12c
+#define RPMAC_TXMACHEADER4 0x130
+#define RPMAC_TXMACHEADER5 0x134
+#define RPMAC_TXDADATYPE 0x138
+#define RPMAC_TXRANDOMSEED 0x13c
+#define RPMAC_CCKPLCPPREAMBLE 0x140
+#define RPMAC_CCKPLCPHEADER 0x144
+#define RPMAC_CCKCRC16 0x148
+#define RPMAC_OFDMRXCRC32OK 0x170
+#define RPMAC_OFDMRXCRC32ER 0x174
+#define RPMAC_OFDMRXPARITYER 0x178
+#define RPMAC_OFDMRXCRC8ER 0x17c
+#define RPMAC_CCKCRXRC16ER 0x180
+#define RPMAC_CCKCRXRC32ER 0x184
+#define RPMAC_CCKCRXRC32OK 0x188
+#define RPMAC_TXSTATUS 0x18c
+
+#define RFPGA0_RFMOD 0x800
+
+#define RFPGA0_TXINFO 0x804
+#define RFPGA0_PSDFUNCTION 0x808
+
+#define RFPGA0_TXGAINSTAGE 0x80c
+
+#define RFPGA0_RFTIMING1 0x810
+#define RFPGA0_RFTIMING2 0x814
+
+#define RFPGA0_XA_HSSIPARAMETER1 0x820
+#define RFPGA0_XA_HSSIPARAMETER2 0x824
+#define RFPGA0_XB_HSSIPARAMETER1 0x828
+#define RFPGA0_XB_HSSIPARAMETER2 0x82c
+
+#define RFPGA0_XA_LSSIPARAMETER 0x840
+#define RFPGA0_XB_LSSIPARAMETER 0x844
+
+#define RFPGA0_RFWAKEUPPARAMETER 0x850
+#define RFPGA0_RFSLEEPUPPARAMETER 0x854
+
+#define RFPGA0_XAB_SWITCHCONTROL 0x858
+#define RFPGA0_XCD_SWITCHCONTROL 0x85c
+
+#define RFPGA0_XA_RFINTERFACEOE 0x860
+#define RFPGA0_XB_RFINTERFACEOE 0x864
+
+#define RFPGA0_XAB_RFINTERFACESW 0x870
+#define RFPGA0_XCD_RFINTERFACESW 0x874
+
+#define RFPGA0_XAB_RFPARAMETER 0x878
+#define RFPGA0_XCD_RFPARAMETER 0x87c
+
+#define RFPGA0_ANALOGPARAMETER1 0x880
+#define RFPGA0_ANALOGPARAMETER2 0x884
+#define RFPGA0_ANALOGPARAMETER3 0x888
+#define RFPGA0_ANALOGPARAMETER4 0x88c
+
+#define RFPGA0_XA_LSSIREADBACK 0x8a0
+#define RFPGA0_XB_LSSIREADBACK 0x8a4
+#define RFPGA0_XC_LSSIREADBACK 0x8a8
+#define RFPGA0_XD_LSSIREADBACK 0x8ac
+
+#define RFPGA0_PSDREPORT 0x8b4
+#define TRANSCEIVEA_HSPI_READBACK 0x8b8
+#define TRANSCEIVEB_HSPI_READBACK 0x8bc
+#define REG_SC_CNT 0x8c4
+#define RFPGA0_XAB_RFINTERFACERB 0x8e0
+#define RFPGA0_XCD_RFINTERFACERB 0x8e4
+
+#define RFPGA1_RFMOD 0x900
+
+#define RFPGA1_TXBLOCK 0x904
+#define RFPGA1_DEBUGSELECT 0x908
+#define RFPGA1_TXINFO 0x90c
+
+#define RCCK0_SYSTEM 0xa00
+
+#define RCCK0_AFESETTING 0xa04
+#define RCCK0_CCA 0xa08
+
+#define RCCK0_RXAGC1 0xa0c
+#define RCCK0_RXAGC2 0xa10
+
+#define RCCK0_RXHP 0xa14
+
+#define RCCK0_DSPPARAMETER1 0xa18
+#define RCCK0_DSPPARAMETER2 0xa1c
+
+#define RCCK0_TXFILTER1 0xa20
+#define RCCK0_TXFILTER2 0xa24
+#define RCCK0_DEBUGPORT 0xa28
+#define RCCK0_FALSEALARMREPORT 0xa2c
+#define RCCK0_TRSSIREPORT 0xa50
+#define RCCK0_RXREPORT 0xa54
+#define RCCK0_FACOUNTERLOWER 0xa5c
+#define RCCK0_FACOUNTERUPPER 0xa58
+#define RCCK0_CCA_CNT 0xa60
+
+
+/* PageB(0xB00) */
+#define RPDP_ANTA 0xb00
+#define RPDP_ANTA_4 0xb04
+#define RPDP_ANTA_8 0xb08
+#define RPDP_ANTA_C 0xb0c
+#define RPDP_ANTA_10 0xb10
+#define RPDP_ANTA_14 0xb14
+#define RPDP_ANTA_18 0xb18
+#define RPDP_ANTA_1C 0xb1c
+#define RPDP_ANTA_20 0xb20
+#define RPDP_ANTA_24 0xb24
+
+#define RCONFIG_PMPD_ANTA 0xb28
+#define CONFIG_RAM64X16 0xb2c
+
+#define RBNDA 0xb30
+#define RHSSIPAR 0xb34
+
+#define RCONFIG_ANTA 0xb68
+#define RCONFIG_ANTB 0xb6c
+
+#define RPDP_ANTB 0xb70
+#define RPDP_ANTB_4 0xb74
+#define RPDP_ANTB_8 0xb78
+#define RPDP_ANTB_C 0xb7c
+#define RPDP_ANTB_10 0xb80
+#define RPDP_ANTB_14 0xb84
+#define RPDP_ANTB_18 0xb88
+#define RPDP_ANTB_1C 0xb8c
+#define RPDP_ANTB_20 0xb90
+#define RPDP_ANTB_24 0xb94
+
+#define RCONFIG_PMPD_ANTB 0xb98
+
+#define RBNDB 0xba0
+
+#define RAPK 0xbd8
+#define RPM_RX0_ANTA 0xbdc
+#define RPM_RX1_ANTA 0xbe0
+#define RPM_RX2_ANTA 0xbe4
+#define RPM_RX3_ANTA 0xbe8
+#define RPM_RX0_ANTB 0xbec
+#define RPM_RX1_ANTB 0xbf0
+#define RPM_RX2_ANTB 0xbf4
+#define RPM_RX3_ANTB 0xbf8
+
+/*Page C*/
+#define ROFDM0_LSTF 0xc00
+
+#define ROFDM0_TRXPATHENABLE 0xc04
+#define ROFDM0_TRMUXPAR 0xc08
+#define ROFDM0_TRSWISOLATION 0xc0c
+
+#define ROFDM0_XARXAFE 0xc10
+#define ROFDM0_XARXIQIMBALANCE 0xc14
+#define ROFDM0_XBRXAFE 0xc18
+#define ROFDM0_XBRXIQIMBALANCE 0xc1c
+#define ROFDM0_XCRXAFE 0xc20
+#define ROFDM0_XCRXIQIMBANLANCE 0xc24
+#define ROFDM0_XDRXAFE 0xc28
+#define ROFDM0_XDRXIQIMBALANCE 0xc2c
+
+#define ROFDM0_RXDETECTOR1 0xc30
+#define ROFDM0_RXDETECTOR2 0xc34
+#define ROFDM0_RXDETECTOR3 0xc38
+#define ROFDM0_RXDETECTOR4 0xc3c
+
+#define ROFDM0_RXDSP 0xc40
+#define ROFDM0_CFOANDDAGC 0xc44
+#define ROFDM0_CCADROPTHRESHOLD 0xc48
+#define ROFDM0_ECCATHRESHOLD 0xc4c
+
+#define ROFDM0_XAAGCCORE1 0xc50
+#define ROFDM0_XAAGCCORE2 0xc54
+#define ROFDM0_XBAGCCORE1 0xc58
+#define ROFDM0_XBAGCCORE2 0xc5c
+#define ROFDM0_XCAGCCORE1 0xc60
+#define ROFDM0_XCAGCCORE2 0xc64
+#define ROFDM0_XDAGCCORE1 0xc68
+#define ROFDM0_XDAGCCORE2 0xc6c
+
+#define ROFDM0_AGCPARAMETER1 0xc70
+#define ROFDM0_AGCPARAMETER2 0xc74
+#define ROFDM0_AGCRSSITABLE 0xc78
+#define ROFDM0_HTSTFAGC 0xc7c
+
+#define ROFDM0_XATXIQIMBALANCE 0xc80
+#define ROFDM0_XATXAFE 0xc84
+#define ROFDM0_XBTXIQIMBALANCE 0xc88
+#define ROFDM0_XBTXAFE 0xc8c
+#define ROFDM0_XCTXIQIMBALANCE 0xc90
+#define ROFDM0_XCTXAFE 0xc94
+#define ROFDM0_XDTXIQIMBALANCE 0xc98
+#define ROFDM0_XDTXAFE 0xc9c
+
+#define ROFDM0_RXIQEXTANTA 0xca0
+#define ROFDM0_TXCOEFF1 0xca4
+#define ROFDM0_TXCOEFF2 0xca8
+#define ROFDM0_TXCOEFF3 0xcac
+#define ROFDM0_TXCOEFF4 0xcb0
+#define ROFDM0_TXCOEFF5 0xcb4
+#define ROFDM0_TXCOEFF6 0xcb8
+
+#define ROFDM0_RXHPPARAMETER 0xce0
+#define ROFDM0_TXPSEUDONOISEWGT 0xce4
+#define ROFDM0_FRAMESYNC 0xcf0
+#define ROFDM0_DFSREPORT 0xcf4
+
+
+#define ROFDM1_LSTF 0xd00
+#define ROFDM1_TRXPATHENABLE 0xd04
+
+#define ROFDM1_CF0 0xd08
+#define ROFDM1_CSI1 0xd10
+#define ROFDM1_SBD 0xd14
+#define ROFDM1_CSI2 0xd18
+#define ROFDM1_CFOTRACKING 0xd2c
+#define ROFDM1_TRXMESAURE1 0xd34
+#define ROFDM1_INTFDET 0xd3c
+#define ROFDM1_PSEUDONOISESTATEAB 0xd50
+#define ROFDM1_PSEUDONOISESTATECD 0xd54
+#define ROFDM1_RXPSEUDONOISEWGT 0xd58
+
+#define ROFDM_PHYCOUNTER1 0xda0
+#define ROFDM_PHYCOUNTER2 0xda4
+#define ROFDM_PHYCOUNTER3 0xda8
+
+#define ROFDM_SHORTCFOAB 0xdac
+#define ROFDM_SHORTCFOCD 0xdb0
+#define ROFDM_LONGCFOAB 0xdb4
+#define ROFDM_LONGCFOCD 0xdb8
+#define ROFDM_TAILCF0AB 0xdbc
+#define ROFDM_TAILCF0CD 0xdc0
+#define ROFDM_PWMEASURE1 0xdc4
+#define ROFDM_PWMEASURE2 0xdc8
+#define ROFDM_BWREPORT 0xdcc
+#define ROFDM_AGCREPORT 0xdd0
+#define ROFDM_RXSNR 0xdd4
+#define ROFDM_RXEVMCSI 0xdd8
+#define ROFDM_SIGREPORT 0xddc
+
+#define RTXAGC_A_RATE18_06 0xe00
+#define RTXAGC_A_RATE54_24 0xe04
+#define RTXAGC_A_CCK1_MCS32 0xe08
+#define RTXAGC_A_MCS03_MCS00 0xe10
+#define RTXAGC_A_MCS07_MCS04 0xe14
+#define RTXAGC_A_MCS11_MCS08 0xe18
+#define RTXAGC_A_MCS15_MCS12 0xe1c
+
+#define RTXAGC_B_RATE18_06 0x830
+#define RTXAGC_B_RATE54_24 0x834
+#define RTXAGC_B_CCK1_55_MCS32 0x838
+#define RTXAGC_B_MCS03_MCS00 0x83c
+#define RTXAGC_B_MCS07_MCS04 0x848
+#define RTXAGC_B_MCS11_MCS08 0x84c
+#define RTXAGC_B_MCS15_MCS12 0x868
+#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
+
+#define RFPGA0_IQK 0xe28
+#define RTX_IQK_TONE_A 0xe30
+#define RRX_IQK_TONE_A 0xe34
+#define RTX_IQK_PI_A 0xe38
+#define RRX_IQK_PI_A 0xe3c
+
+#define RTX_IQK 0xe40
+#define RRX_IQK 0xe44
+#define RIQK_AGC_PTS 0xe48
+#define RIQK_AGC_RSP 0xe4c
+#define RTX_IQK_TONE_B 0xe50
+#define RRX_IQK_TONE_B 0xe54
+#define RTX_IQK_PI_B 0xe58
+#define RRX_IQK_PI_B 0xe5c
+#define RIQK_AGC_CONT 0xe60
+
+#define RBLUE_TOOTH 0xe6c
+#define RRX_WAIT_CCA 0xe70
+#define RTX_CCK_RFON 0xe74
+#define RTX_CCK_BBON 0xe78
+#define RTX_OFDM_RFON 0xe7c
+#define RTX_OFDM_BBON 0xe80
+#define RTX_TO_RX 0xe84
+#define RTX_TO_TX 0xe88
+#define RRX_CCK 0xe8c
+
+#define RTX_POWER_BEFORE_IQK_A 0xe94
+#define RTX_POWER_AFTER_IQK_A 0xe9c
+
+#define RRX_POWER_BEFORE_IQK_A 0xea0
+#define RRX_POWER_BEFORE_IQK_A_2 0xea4
+#define RRX_POWER_AFTER_IQK_A 0xea8
+#define RRX_POWER_AFTER_IQK_A_2 0xeac
+
+#define RTX_POWER_BEFORE_IQK_B 0xeb4
+#define RTX_POWER_AFTER_IQK_B 0xebc
+
+#define RRX_POWER_BEFORE_IQK_B 0xec0
+#define RRX_POWER_BEFORE_IQK_B_2 0xec4
+#define RRX_POWER_AFTER_IQK_B 0xec8
+#define RRX_POWER_AFTER_IQK_B_2 0xecc
+
+#define RRX_OFDM 0xed0
+#define RRX_WAIT_RIFS 0xed4
+#define RRX_TO_RX 0xed8
+#define RSTANDBY 0xedc
+#define RSLEEP 0xee0
+#define RPMPD_ANAEN 0xeec
+
+#define RZEBRA1_HSSIENABLE 0x0
+#define RZEBRA1_TRXENABLE1 0x1
+#define RZEBRA1_TRXENABLE2 0x2
+#define RZEBRA1_AGC 0x4
+#define RZEBRA1_CHARGEPUMP 0x5
+#define RZEBRA1_CHANNEL 0x7
+
+#define RZEBRA1_TXGAIN 0x8
+#define RZEBRA1_TXLPF 0x9
+#define RZEBRA1_RXLPF 0xb
+#define RZEBRA1_RXHPFCORNER 0xc
+
+#define RGLOBALCTRL 0
+#define RRTL8256_TXLPF 19
+#define RRTL8256_RXLPF 11
+#define RRTL8258_TXLPF 0x11
+#define RRTL8258_RXLPF 0x13
+#define RRTL8258_RSSILPF 0xa
+
+#define RF_AC 0x00
+
+#define RF_IQADJ_G1 0x01
+#define RF_IQADJ_G2 0x02
+#define RF_POW_TRSW 0x05
+
+#define RF_GAIN_RX 0x06
+#define RF_GAIN_TX 0x07
+
+#define RF_TXM_IDAC 0x08
+#define RF_BS_IQGEN 0x0F
+
+#define RF_MODE1 0x10
+#define RF_MODE2 0x11
+
+#define RF_RX_AGC_HP 0x12
+#define RF_TX_AGC 0x13
+#define RF_BIAS 0x14
+#define RF_IPA 0x15
+#define RF_POW_ABILITY 0x17
+#define RF_MODE_AG 0x18
+#define RRFCHANNEL 0x18
+#define RF_CHNLBW 0x18
+#define RF_TOP 0x19
+
+#define RF_RX_G1 0x1A
+#define RF_RX_G2 0x1B
+
+#define RF_RX_BB2 0x1C
+#define RF_RX_BB1 0x1D
+
+#define RF_RCK1 0x1E
+#define RF_RCK2 0x1F
+
+#define RF_TX_G1 0x20
+#define RF_TX_G2 0x21
+#define RF_TX_G3 0x22
+
+#define RF_TX_BB1 0x23
+#define RF_T_METER 0x42
+
+#define RF_SYN_G1 0x25
+#define RF_SYN_G2 0x26
+#define RF_SYN_G3 0x27
+#define RF_SYN_G4 0x28
+#define RF_SYN_G5 0x29
+#define RF_SYN_G6 0x2A
+#define RF_SYN_G7 0x2B
+#define RF_SYN_G8 0x2C
+
+#define RF_RCK_OS 0x30
+#define RF_TXPA_G1 0x31
+#define RF_TXPA_G2 0x32
+#define RF_TXPA_G3 0x33
+
+#define RF_TX_BIAS_A 0x35
+#define RF_TX_BIAS_D 0x36
+#define RF_LOBF_9 0x38
+#define RF_RXRF_A3 0x3C
+#define RF_TRSW 0x3F
+
+#define RF_TXRF_A2 0x41
+#define RF_TXPA_G4 0x46
+#define RF_TXPA_A4 0x4B
+
+#define RF_WE_LUT 0xEF
+
+#define BBBRESETB 0x100
+#define BGLOBALRESETB 0x200
+#define BOFDMTXSTART 0x4
+#define BCCKTXSTART 0x8
+#define BCRC32DEBUG 0x100
+#define BPMACLOOPBACK 0x10
+#define BTXLSIG 0xffffff
+#define BOFDMTXRATE 0xf
+#define BOFDMTXRESERVED 0x10
+#define BOFDMTXLENGTH 0x1ffe0
+#define BOFDMTXPARITY 0x20000
+#define BTXHTSIG1 0xffffff
+#define BTXHTMCSRATE 0x7f
+#define BTXHTBW 0x80
+#define BTXHTLENGTH 0xffff00
+#define BTXHTSIG2 0xffffff
+#define BTXHTSMOOTHING 0x1
+#define BTXHTSOUNDING 0x2
+#define BTXHTRESERVED 0x4
+#define BTXHTAGGREATION 0x8
+#define BTXHTSTBC 0x30
+#define BTXHTADVANCECODING 0x40
+#define BTXHTSHORTGI 0x80
+#define BTXHTNUMBERHT_LTF 0x300
+#define BTXHTCRC8 0x3fc00
+#define BCOUNTERRESET 0x10000
+#define BNUMOFOFDMTX 0xffff
+#define BNUMOFCCKTX 0xffff0000
+#define BTXIDLEINTERVAL 0xffff
+#define BOFDMSERVICE 0xffff0000
+#define BTXMACHEADER 0xffffffff
+#define BTXDATAINIT 0xff
+#define BTXHTMODE 0x100
+#define BTXDATATYPE 0x30000
+#define BTXRANDOMSEED 0xffffffff
+#define BCCKTXPREAMBLE 0x1
+#define BCCKTXSFD 0xffff0000
+#define BCCKTXSIG 0xff
+#define BCCKTXSERVICE 0xff00
+#define BCCKLENGTHEXT 0x8000
+#define BCCKTXLENGHT 0xffff0000
+#define BCCKTXCRC16 0xffff
+#define BCCKTXSTATUS 0x1
+#define BOFDMTXSTATUS 0x2
+#define IS_BB_REG_OFFSET_92S(_offset) \
+ ((_offset >= 0x800) && (_offset <= 0xfff))
+
+#define BRFMOD 0x1
+#define BJAPANMODE 0x2
+#define BCCKTXSC 0x30
+#define BCCKEN 0x1000000
+#define BOFDMEN 0x2000000
+
+#define BOFDMRXADCPHASE 0x10000
+#define BOFDMTXDACPHASE 0x40000
+#define BXATXAGC 0x3f
+
+#define BXBTXAGC 0xf00
+#define BXCTXAGC 0xf000
+#define BXDTXAGC 0xf0000
+
+#define BPASTART 0xf0000000
+#define BTRSTART 0x00f00000
+#define BRFSTART 0x0000f000
+#define BBBSTART 0x000000f0
+#define BBBCCKSTART 0x0000000f
+#define BPAEND 0xf
+#define BTREND 0x0f000000
+#define BRFEND 0x000f0000
+#define BCCAMASK 0x000000f0
+#define BR2RCCAMASK 0x00000f00
+#define BHSSI_R2TDELAY 0xf8000000
+#define BHSSI_T2RDELAY 0xf80000
+#define BCONTXHSSI 0x400
+#define BIGFROMCCK 0x200
+#define BAGCADDRESS 0x3f
+#define BRXHPTX 0x7000
+#define BRXHP2RX 0x38000
+#define BRXHPCCKINI 0xc0000
+#define BAGCTXCODE 0xc00000
+#define BAGCRXCODE 0x300000
+
+#define B3WIREDATALENGTH 0x800
+#define B3WIREADDREAALENGTH 0x400
+
+#define B3WIRERFPOWERDOWN 0x1
+#define B5GPAPEPOLARITY 0x40000000
+#define B2GPAPEPOLARITY 0x80000000
+#define BRFSW_TXDEFAULTANT 0x3
+#define BRFSW_TXOPTIONANT 0x30
+#define BRFSW_RXDEFAULTANT 0x300
+#define BRFSW_RXOPTIONANT 0x3000
+#define BRFSI_3WIREDATA 0x1
+#define BRFSI_3WIRECLOCK 0x2
+#define BRFSI_3WIRELOAD 0x4
+#define BRFSI_3WIRERW 0x8
+#define BRFSI_3WIRE 0xf
+
+#define BRFSI_RFENV 0x10
+
+#define BRFSI_TRSW 0x20
+#define BRFSI_TRSWB 0x40
+#define BRFSI_ANTSW 0x100
+#define BRFSI_ANTSWB 0x200
+#define BRFSI_PAPE 0x400
+#define BRFSI_PAPE5G 0x800
+#define BBANDSELECT 0x1
+#define BHTSIG2_GI 0x80
+#define BHTSIG2_SMOOTHING 0x01
+#define BHTSIG2_SOUNDING 0x02
+#define BHTSIG2_AGGREATON 0x08
+#define BHTSIG2_STBC 0x30
+#define BHTSIG2_ADVCODING 0x40
+#define BHTSIG2_NUMOFHTLTF 0x300
+#define BHTSIG2_CRC8 0x3fc
+#define BHTSIG1_MCS 0x7f
+#define BHTSIG1_BANDWIDTH 0x80
+#define BHTSIG1_HTLENGTH 0xffff
+#define BLSIG_RATE 0xf
+#define BLSIG_RESERVED 0x10
+#define BLSIG_LENGTH 0x1fffe
+#define BLSIG_PARITY 0x20
+#define BCCKRXPHASE 0x4
+
+#define BLSSIREADADDRESS 0x7f800000
+#define BLSSIREADEDGE 0x80000000
+
+#define BLSSIREADBACKDATA 0xfffff
+
+#define BLSSIREADOKFLAG 0x1000
+#define BCCKSAMPLERATE 0x8
+#define BREGULATOR0STANDBY 0x1
+#define BREGULATORPLLSTANDBY 0x2
+#define BREGULATOR1STANDBY 0x4
+#define BPLLPOWERUP 0x8
+#define BDPLLPOWERUP 0x10
+#define BDA10POWERUP 0x20
+#define BAD7POWERUP 0x200
+#define BDA6POWERUP 0x2000
+#define BXTALPOWERUP 0x4000
+#define B40MDCLKPOWERUP 0x8000
+#define BDA6DEBUGMODE 0x20000
+#define BDA6SWING 0x380000
+
+#define BADCLKPHASE 0x4000000
+#define B80MCLKDELAY 0x18000000
+#define BAFEWATCHDOGENABLE 0x20000000
+
+#define BXTALCAP01 0xc0000000
+#define BXTALCAP23 0x3
+#define BXTALCAP92X 0x0f000000
+#define BXTALCAP 0x0f000000
+
+#define BINTDIFCLKENABLE 0x400
+#define BEXTSIGCLKENABLE 0x800
+#define BBANDGAP_MBIAS_POWERUP 0x10000
+#define BAD11SH_GAIN 0xc0000
+#define BAD11NPUT_RANGE 0x700000
+#define BAD110P_CURRENT 0x3800000
+#define BLPATH_LOOPBACK 0x4000000
+#define BQPATH_LOOPBACK 0x8000000
+#define BAFE_LOOPBACK 0x10000000
+#define BDA10_SWING 0x7e0
+#define BDA10_REVERSE 0x800
+#define BDA_CLK_SOURCE 0x1000
+#define BDA7INPUT_RANGE 0x6000
+#define BDA7_GAIN 0x38000
+#define BDA7OUTPUT_CM_MODE 0x40000
+#define BDA7INPUT_CM_MODE 0x380000
+#define BDA7CURRENT 0xc00000
+#define BREGULATOR_ADJUST 0x7000000
+#define BAD11POWERUP_ATTX 0x1
+#define BDA10PS_ATTX 0x10
+#define BAD11POWERUP_ATRX 0x100
+#define BDA10PS_ATRX 0x1000
+#define BCCKRX_AGC_FORMAT 0x200
+#define BPSDFFT_SAMPLE_POINT 0xc000
+#define BPSD_AVERAGE_NUM 0x3000
+#define BIQPATH_CONTROL 0xc00
+#define BPSD_FREQ 0x3ff
+#define BPSD_ANTENNA_PATH 0x30
+#define BPSD_IQ_SWITCH 0x40
+#define BPSD_RX_TRIGGER 0x400000
+#define BPSD_TX_TRIGGER 0x80000000
+#define BPSD_SINE_TONE_SCALE 0x7f000000
+#define BPSD_REPORT 0xffff
+
+#define BOFDM_TXSC 0x30000000
+#define BCCK_TXON 0x1
+#define BOFDM_TXON 0x2
+#define BDEBUG_PAGE 0xfff
+#define BDEBUG_ITEM 0xff
+#define BANTL 0x10
+#define BANT_NONHT 0x100
+#define BANT_HT1 0x1000
+#define BANT_HT2 0x10000
+#define BANT_HT1S1 0x100000
+#define BANT_NONHTS1 0x1000000
+
+#define BCCK_BBMODE 0x3
+#define BCCK_TXPOWERSAVING 0x80
+#define BCCK_RXPOWERSAVING 0x40
+
+#define BCCK_SIDEBAND 0x10
+
+#define BCCK_SCRAMBLE 0x8
+#define BCCK_ANTDIVERSITY 0x8000
+#define BCCK_CARRIER_RECOVERY 0x4000
+#define BCCK_TXRATE 0x3000
+#define BCCK_DCCANCEL 0x0800
+#define BCCK_ISICANCEL 0x0400
+#define BCCK_MATCH_FILTER 0x0200
+#define BCCK_EQUALIZER 0x0100
+#define BCCK_PREAMBLE_DETECT 0x800000
+#define BCCK_FAST_FALSECCA 0x400000
+#define BCCK_CH_ESTSTART 0x300000
+#define BCCK_CCA_COUNT 0x080000
+#define BCCK_CS_LIM 0x070000
+#define BCCK_BIST_MODE 0x80000000
+#define BCCK_CCAMASK 0x40000000
+#define BCCK_TX_DAC_PHASE 0x4
+#define BCCK_RX_ADC_PHASE 0x20000000
+#define BCCKR_CP_MODE 0x0100
+#define BCCK_TXDC_OFFSET 0xf0
+#define BCCK_RXDC_OFFSET 0xf
+#define BCCK_CCA_MODE 0xc000
+#define BCCK_FALSECS_LIM 0x3f00
+#define BCCK_CS_RATIO 0xc00000
+#define BCCK_CORGBIT_SEL 0x300000
+#define BCCK_PD_LIM 0x0f0000
+#define BCCK_NEWCCA 0x80000000
+#define BCCK_RXHP_OF_IG 0x8000
+#define BCCK_RXIG 0x7f00
+#define BCCK_LNA_POLARITY 0x800000
+#define BCCK_RX1ST_BAIN 0x7f0000
+#define BCCK_RF_EXTEND 0x20000000
+#define BCCK_RXAGC_SATLEVEL 0x1f000000
+#define BCCK_RXAGC_SATCOUNT 0xe0
+#define BCCKRXRFSETTLE 0x1f
+#define BCCK_FIXED_RXAGC 0x8000
+#define BCCK_ANTENNA_POLARITY 0x2000
+#define BCCK_TXFILTER_TYPE 0x0c00
+#define BCCK_RXAGC_REPORTTYPE 0x0300
+#define BCCK_RXDAGC_EN 0x80000000
+#define BCCK_RXDAGC_PERIOD 0x20000000
+#define BCCK_RXDAGC_SATLEVEL 0x1f000000
+#define BCCK_TIMING_RECOVERY 0x800000
+#define BCCK_TXC0 0x3f0000
+#define BCCK_TXC1 0x3f000000
+#define BCCK_TXC2 0x3f
+#define BCCK_TXC3 0x3f00
+#define BCCK_TXC4 0x3f0000
+#define BCCK_TXC5 0x3f000000
+#define BCCK_TXC6 0x3f
+#define BCCK_TXC7 0x3f00
+#define BCCK_DEBUGPORT 0xff0000
+#define BCCK_DAC_DEBUG 0x0f000000
+#define BCCK_FALSEALARM_ENABLE 0x8000
+#define BCCK_FALSEALARM_READ 0x4000
+#define BCCK_TRSSI 0x7f
+#define BCCK_RXAGC_REPORT 0xfe
+#define BCCK_RXREPORT_ANTSEL 0x80000000
+#define BCCK_RXREPORT_MFOFF 0x40000000
+#define BCCK_RXREPORT_SQLOSS 0x20000000
+#define BCCK_RXREPORT_PKTLOSS 0x10000000
+#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
+#define BCCK_RXREPORT_RATEERROR 0x04000000
+#define BCCK_RXREPORT_RXRATE 0x03000000
+#define BCCK_RXFA_COUNTER_LOWER 0xff
+#define BCCK_RXFA_COUNTER_UPPER 0xff000000
+#define BCCK_RXHPAGC_START 0xe000
+#define BCCK_RXHPAGC_FINAL 0x1c00
+#define BCCK_RXFALSEALARM_ENABLE 0x8000
+#define BCCK_FACOUNTER_FREEZE 0x4000
+#define BCCK_TXPATH_SEL 0x10000000
+#define BCCK_DEFAULT_RXPATH 0xc000000
+#define BCCK_OPTION_RXPATH 0x3000000
+
+#define BNUM_OFSTF 0x3
+#define BSHIFT_L 0xc0
+#define BGI_TH 0xc
+#define BRXPATH_A 0x1
+#define BRXPATH_B 0x2
+#define BRXPATH_C 0x4
+#define BRXPATH_D 0x8
+#define BTXPATH_A 0x1
+#define BTXPATH_B 0x2
+#define BTXPATH_C 0x4
+#define BTXPATH_D 0x8
+#define BTRSSI_FREQ 0x200
+#define BADC_BACKOFF 0x3000
+#define BDFIR_BACKOFF 0xc000
+#define BTRSSI_LATCH_PHASE 0x10000
+#define BRX_LDC_OFFSET 0xff
+#define BRX_QDC_OFFSET 0xff00
+#define BRX_DFIR_MODE 0x1800000
+#define BRX_DCNF_TYPE 0xe000000
+#define BRXIQIMB_A 0x3ff
+#define BRXIQIMB_B 0xfc00
+#define BRXIQIMB_C 0x3f0000
+#define BRXIQIMB_D 0xffc00000
+#define BDC_DC_NOTCH 0x60000
+#define BRXNB_NOTCH 0x1f000000
+#define BPD_TH 0xf
+#define BPD_TH_OPT2 0xc000
+#define BPWED_TH 0x700
+#define BIFMF_WIN_L 0x800
+#define BPD_OPTION 0x1000
+#define BMF_WIN_L 0xe000
+#define BBW_SEARCH_L 0x30000
+#define BWIN_ENH_L 0xc0000
+#define BBW_TH 0x700000
+#define BED_TH2 0x3800000
+#define BBW_OPTION 0x4000000
+#define BRADIO_TH 0x18000000
+#define BWINDOW_L 0xe0000000
+#define BSBD_OPTION 0x1
+#define BFRAME_TH 0x1c
+#define BFS_OPTION 0x60
+#define BDC_SLOPE_CHECK 0x80
+#define BFGUARD_COUNTER_DC_L 0xe00
+#define BFRAME_WEIGHT_SHORT 0x7000
+#define BSUB_TUNE 0xe00000
+#define BFRAME_DC_LENGTH 0xe000000
+#define BSBD_START_OFFSET 0x30000000
+#define BFRAME_TH_2 0x7
+#define BFRAME_GI2_TH 0x38
+#define BGI2_SYNC_EN 0x40
+#define BSARCH_SHORT_EARLY 0x300
+#define BSARCH_SHORT_LATE 0xc00
+#define BSARCH_GI2_LATE 0x70000
+#define BCFOANTSUM 0x1
+#define BCFOACC 0x2
+#define BCFOSTARTOFFSET 0xc
+#define BCFOLOOPBACK 0x70
+#define BCFOSUMWEIGHT 0x80
+#define BDAGCENABLE 0x10000
+#define BTXIQIMB_A 0x3ff
+#define BTXIQIMB_b 0xfc00
+#define BTXIQIMB_C 0x3f0000
+#define BTXIQIMB_D 0xffc00000
+#define BTXIDCOFFSET 0xff
+#define BTXIQDCOFFSET 0xff00
+#define BTXDFIRMODE 0x10000
+#define BTXPESUDO_NOISEON 0x4000000
+#define BTXPESUDO_NOISE_A 0xff
+#define BTXPESUDO_NOISE_B 0xff00
+#define BTXPESUDO_NOISE_C 0xff0000
+#define BTXPESUDO_NOISE_D 0xff000000
+#define BCCA_DROPOPTION 0x20000
+#define BCCA_DROPTHRES 0xfff00000
+#define BEDCCA_H 0xf
+#define BEDCCA_L 0xf0
+#define BLAMBDA_ED 0x300
+#define BRX_INITIALGAIN 0x7f
+#define BRX_ANTDIV_EN 0x80
+#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
+#define BRX_HIGHPOWER_FLOW 0x8000
+#define BRX_AGC_FREEZE_THRES 0xc0000
+#define BRX_FREEZESTEP_AGC1 0x300000
+#define BRX_FREEZESTEP_AGC2 0xc00000
+#define BRX_FREEZESTEP_AGC3 0x3000000
+#define BRX_FREEZESTEP_AGC0 0xc000000
+#define BRXRSSI_CMP_EN 0x10000000
+#define BRXQUICK_AGCEN 0x20000000
+#define BRXAGC_FREEZE_THRES_MODE 0x40000000
+#define BRX_OVERFLOW_CHECKTYPE 0x80000000
+#define BRX_AGCSHIFT 0x7f
+#define BTRSW_TRI_ONLY 0x80
+#define BPOWER_THRES 0x300
+#define BRXAGC_EN 0x1
+#define BRXAGC_TOGETHER_EN 0x2
+#define BRXAGC_MIN 0x4
+#define BRXHP_INI 0x7
+#define BRXHP_TRLNA 0x70
+#define BRXHP_RSSI 0x700
+#define BRXHP_BBP1 0x7000
+#define BRXHP_BBP2 0x70000
+#define BRXHP_BBP3 0x700000
+#define BRSSI_H 0x7f0000
+#define BRSSI_GEN 0x7f000000
+#define BRXSETTLE_TRSW 0x7
+#define BRXSETTLE_LNA 0x38
+#define BRXSETTLE_RSSI 0x1c0
+#define BRXSETTLE_BBP 0xe00
+#define BRXSETTLE_RXHP 0x7000
+#define BRXSETTLE_ANTSW_RSSI 0x38000
+#define BRXSETTLE_ANTSW 0xc0000
+#define BRXPROCESS_TIME_DAGC 0x300000
+#define BRXSETTLE_HSSI 0x400000
+#define BRXPROCESS_TIME_BBPPW 0x800000
+#define BRXANTENNA_POWER_SHIFT 0x3000000
+#define BRSSI_TABLE_SELECT 0xc000000
+#define BRXHP_FINAL 0x7000000
+#define BRXHPSETTLE_BBP 0x7
+#define BRXHTSETTLE_HSSI 0x8
+#define BRXHTSETTLE_RXHP 0x70
+#define BRXHTSETTLE_BBPPW 0x80
+#define BRXHTSETTLE_IDLE 0x300
+#define BRXHTSETTLE_RESERVED 0x1c00
+#define BRXHT_RXHP_EN 0x8000
+#define BRXAGC_FREEZE_THRES 0x30000
+#define BRXAGC_TOGETHEREN 0x40000
+#define BRXHTAGC_MIN 0x80000
+#define BRXHTAGC_EN 0x100000
+#define BRXHTDAGC_EN 0x200000
+#define BRXHT_RXHP_BBP 0x1c00000
+#define BRXHT_RXHP_FINAL 0xe0000000
+#define BRXPW_RADIO_TH 0x3
+#define BRXPW_RADIO_EN 0x4
+#define BRXMF_HOLD 0x3800
+#define BRXPD_DELAY_TH1 0x38
+#define BRXPD_DELAY_TH2 0x1c0
+#define BRXPD_DC_COUNT_MAX 0x600
+#define BRXPD_DELAY_TH 0x8000
+#define BRXPROCESS_DELAY 0xf0000
+#define BRXSEARCHRANGE_GI2_EARLY 0x700000
+#define BRXFRAME_FUARD_COUNTER_L 0x3800000
+#define BRXSGI_GUARD_L 0xc000000
+#define BRXSGI_SEARCH_L 0x30000000
+#define BRXSGI_TH 0xc0000000
+#define BDFSCNT0 0xff
+#define BDFSCNT1 0xff00
+#define BDFSFLAG 0xf0000
+#define BMF_WEIGHT_SUM 0x300000
+#define BMINIDX_TH 0x7f000000
+#define BDAFORMAT 0x40000
+#define BTXCH_EMU_ENABLE 0x01000000
+#define BTRSW_ISOLATION_A 0x7f
+#define BTRSW_ISOLATION_B 0x7f00
+#define BTRSW_ISOLATION_C 0x7f0000
+#define BTRSW_ISOLATION_D 0x7f000000
+#define BEXT_LNA_GAIN 0x7c00
+
+#define BSTBC_EN 0x4
+#define BANTENNA_MAPPING 0x10
+#define BNSS 0x20
+#define BCFO_ANTSUM_ID 0x200
+#define BPHY_COUNTER_RESET 0x8000000
+#define BCFO_REPORT_GET 0x4000000
+#define BOFDM_CONTINUE_TX 0x10000000
+#define BOFDM_SINGLE_CARRIER 0x20000000
+#define BOFDM_SINGLE_TONE 0x40000000
+#define BHT_DETECT 0x100
+#define BCFOEN 0x10000
+#define BCFOVALUE 0xfff00000
+#define BSIGTONE_RE 0x3f
+#define BSIGTONE_IM 0x7f00
+#define BCOUNTER_CCA 0xffff
+#define BCOUNTER_PARITYFAIL 0xffff0000
+#define BCOUNTER_RATEILLEGAL 0xffff
+#define BCOUNTER_CRC8FAIL 0xffff0000
+#define BCOUNTER_MCSNOSUPPORT 0xffff
+#define BCOUNTER_FASTSYNC 0xffff
+#define BSHORTCFO 0xfff
+#define BSHORTCFOT_LENGTH 12
+#define BSHORTCFOF_LENGTH 11
+#define BLONGCFO 0x7ff
+#define BLONGCFOT_LENGTH 11
+#define BLONGCFOF_LENGTH 11
+#define BTAILCFO 0x1fff
+#define BTAILCFOT_LENGTH 13
+#define BTAILCFOF_LENGTH 12
+#define BNOISE_EN_PWDB 0xffff
+#define BCC_POWER_DB 0xffff0000
+#define BMOISE_PWDB 0xffff
+#define BPOWERMEAST_LENGTH 10
+#define BPOWERMEASF_LENGTH 3
+#define BRX_HT_BW 0x1
+#define BRXSC 0x6
+#define BRX_HT 0x8
+#define BNB_INTF_DET_ON 0x1
+#define BINTF_WIN_LEN_CFG 0x30
+#define BNB_INTF_TH_CFG 0x1c0
+#define BRFGAIN 0x3f
+#define BTABLESEL 0x40
+#define BTRSW 0x80
+#define BRXSNR_A 0xff
+#define BRXSNR_B 0xff00
+#define BRXSNR_C 0xff0000
+#define BRXSNR_D 0xff000000
+#define BSNR_EVMT_LENGTH 8
+#define BSNR_EVMF_LENGTH 1
+#define BCSI1ST 0xff
+#define BCSI2ND 0xff00
+#define BRXEVM1ST 0xff0000
+#define BRXEVM2ND 0xff000000
+#define BSIGEVM 0xff
+#define BPWDB 0xff00
+#define BSGIEN 0x10000
+
+#define BSFACTOR_QMA1 0xf
+#define BSFACTOR_QMA2 0xf0
+#define BSFACTOR_QMA3 0xf00
+#define BSFACTOR_QMA4 0xf000
+#define BSFACTOR_QMA5 0xf0000
+#define BSFACTOR_QMA6 0xf0000
+#define BSFACTOR_QMA7 0xf00000
+#define BSFACTOR_QMA8 0xf000000
+#define BSFACTOR_QMA9 0xf0000000
+#define BCSI_SCHEME 0x100000
+
+#define BNOISE_LVL_TOP_SET 0x3
+#define BCHSMOOTH 0x4
+#define BCHSMOOTH_CFG1 0x38
+#define BCHSMOOTH_CFG2 0x1c0
+#define BCHSMOOTH_CFG3 0xe00
+#define BCHSMOOTH_CFG4 0x7000
+#define BMRCMODE 0x800000
+#define BTHEVMCFG 0x7000000
+
+#define BLOOP_FIT_TYPE 0x1
+#define BUPD_CFO 0x40
+#define BUPD_CFO_OFFDATA 0x80
+#define BADV_UPD_CFO 0x100
+#define BADV_TIME_CTRL 0x800
+#define BUPD_CLKO 0x1000
+#define BFC 0x6000
+#define BTRACKING_MODE 0x8000
+#define BPHCMP_ENABLE 0x10000
+#define BUPD_CLKO_LTF 0x20000
+#define BCOM_CH_CFO 0x40000
+#define BCSI_ESTI_MODE 0x80000
+#define BADV_UPD_EQZ 0x100000
+#define BUCHCFG 0x7000000
+#define BUPDEQZ 0x8000000
+
+#define BRX_PESUDO_NOISE_ON 0x20000000
+#define BRX_PESUDO_NOISE_A 0xff
+#define BRX_PESUDO_NOISE_B 0xff00
+#define BRX_PESUDO_NOISE_C 0xff0000
+#define BRX_PESUDO_NOISE_D 0xff000000
+#define BRX_PESUDO_NOISESTATE_A 0xffff
+#define BRX_PESUDO_NOISESTATE_B 0xffff0000
+#define BRX_PESUDO_NOISESTATE_C 0xffff
+#define BRX_PESUDO_NOISESTATE_D 0xffff0000
+
+#define BZEBRA1_HSSIENABLE 0x8
+#define BZEBRA1_TRXCONTROL 0xc00
+#define BZEBRA1_TRXGAINSETTING 0x07f
+#define BZEBRA1_RXCOUNTER 0xc00
+#define BZEBRA1_TXCHANGEPUMP 0x38
+#define BZEBRA1_RXCHANGEPUMP 0x7
+#define BZEBRA1_CHANNEL_NUM 0xf80
+#define BZEBRA1_TXLPFBW 0x400
+#define BZEBRA1_RXLPFBW 0x600
+
+#define BRTL8256REG_MODE_CTRL1 0x100
+#define BRTL8256REG_MODE_CTRL0 0x40
+#define BRTL8256REG_TXLPFBW 0x18
+#define BRTL8256REG_RXLPFBW 0x600
+
+#define BRTL8258_TXLPFBW 0xc
+#define BRTL8258_RXLPFBW 0xc00
+#define BRTL8258_RSSILPFBW 0xc0
+
+#define BBYTE0 0x1
+#define BBYTE1 0x2
+#define BBYTE2 0x4
+#define BBYTE3 0x8
+#define BWORD0 0x3
+#define BWORD1 0xc
+#define BWORD 0xf
+
+#define BENABLE 0x1
+#define BDISABLE 0x0
+
+#define LEFT_ANTENNA 0x0
+#define RIGHT_ANTENNA 0x1
+
+#define TCHECK_TXSTATUS 500
+#define TUPDATE_RXCOUNTER 100
+
+#define REG_UN_used_register 0x01bf
+
+/* WOL bit information */
+#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0)
+#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1)
+#define HAL92C_WOL_DISASSOC_EVENT BIT(2)
+#define HAL92C_WOL_DEAUTH_EVENT BIT(3)
+#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4)
+
+#define WOL_REASON_PTK_UPDATE BIT(0)
+#define WOL_REASON_GTK_UPDATE BIT(1)
+#define WOL_REASON_DISASSOC BIT(2)
+#define WOL_REASON_DEAUTH BIT(3)
+#define WOL_REASON_FW_DISCONNECT BIT(4)
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EFUSE_SEL(x) (((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK 0x300
+#define EFUSE_WIFI_SEL_0 0x0
+
+#define WL_HWPDN_EN BIT(0) /* Enable GPIO[9] as WiFi HW PDn source*/
+#define WL_HWPDN_SL BIT(1) /* WiFi HW PDn polarity control*/
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/rtlwifi/rtl8723be/rf.c
new file mode 100644
index 000000000000..486294930a7b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/rf.c
@@ -0,0 +1,504 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff) | BIT(10) | BIT(11));
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff) | BIT(10));
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ "unknown bandwidth: %#X\n", bandwidth);
+ break;
+ }
+}
+
+void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 tx_agc[2] = {0, 0}, tmpval;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+ u8 direction;
+ u32 pwrtrac_value;
+
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+
+ if (mac->act_scanning) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ }
+ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval =
+ (rtlphy->mcs_offset[0][6]) +
+ (rtlphy->mcs_offset[0][7] << 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+
+ tmpval = (rtlphy->mcs_offset[0][14]) +
+ (rtlphy->mcs_offset[0][15] <<
+ 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *)(&(tx_agc[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+ rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
+ if (direction == 1) {
+ tx_agc[0] += pwrtrac_value;
+ tx_agc[1] += pwrtrac_value;
+ } else if (direction == 2) {
+ tx_agc[0] -= pwrtrac_value;
+ tx_agc[1] -= pwrtrac_value;
+ }
+ tmpval = tx_agc[RF90_PATH_A] & 0xff;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_A_CCK1_MCS32);
+
+ tmpval = tx_agc[RF90_PATH_A] >> 8;
+
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11);
+
+ tmpval = tx_agc[RF90_PATH_B] >> 24;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11);
+
+ tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK1_55_MCS32);
+}
+
+static void rtl8723be_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm,
+ u8 *ppowerlevel_bw20,
+ u8 *ppowerlevel_bw40,
+ u8 channel, u32 *ofdmbase,
+ u32 *mcsbase)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 powerbase0, powerbase1;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerbase0 = ppowerlevel_ofdm[i];
+
+ powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
+ (powerbase0 << 8) | powerbase0;
+ *(ofdmbase + i) = powerbase0;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ " [OFDM power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(ofdmbase + i));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
+ powerlevel[i] = ppowerlevel_bw20[i];
+ else
+ powerlevel[i] = ppowerlevel_bw40[i];
+ powerbase1 = powerlevel[i];
+ powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
+ (powerbase1 << 8) | powerbase1;
+
+ *(mcsbase + i) = powerbase1;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ " [MCS power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(mcsbase + i));
+ }
+}
+
+static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index,
+ u32 *powerbase0, u32 *powerbase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 i, chnlgroup = 0, pwr_diff_limit[4];
+ u8 pwr_diff = 0, customer_pwr_diff;
+ u32 writeval, customer_limit, rf;
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ chnlgroup = 0;
+
+ writeval =
+ rtlphy->mcs_offset[chnlgroup][index + (rf ? 8 : 0)]
+ + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RTK better performance, "
+ "writeval(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+ break;
+ case 1:
+ if (rtlphy->pwrgroup_cnt == 1) {
+ chnlgroup = 0;
+ } else {
+ if (channel < 3)
+ chnlgroup = 0;
+ else if (channel < 6)
+ chnlgroup = 1;
+ else if (channel < 9)
+ chnlgroup = 2;
+ else if (channel < 12)
+ chnlgroup = 3;
+ else if (channel < 14)
+ chnlgroup = 4;
+ else if (channel == 14)
+ chnlgroup = 5;
+ }
+ writeval = rtlphy->mcs_offset[chnlgroup]
+ [index + (rf ? 8 : 0)] + ((index < 2) ?
+ powerbase0[rf] :
+ powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Realtek regulatory, 20MHz, "
+ "writeval(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+
+ break;
+ case 2:
+ writeval =
+ ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Better regulatory, "
+ "writeval(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+ break;
+ case 3:
+ chnlgroup = 0;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "customer's limit, 40MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht40[rf]
+ [channel-1]);
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "customer's limit, 20MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel-1]);
+ }
+
+ if (index < 2)
+ pwr_diff =
+ rtlefuse->txpwr_legacyhtdiff[rf][channel-1];
+ else if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20)
+ pwr_diff =
+ rtlefuse->txpwr_ht20diff[rf][channel-1];
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ customer_pwr_diff =
+ rtlefuse->pwrgroup_ht40[rf][channel-1];
+ else
+ customer_pwr_diff =
+ rtlefuse->pwrgroup_ht20[rf][channel-1];
+
+ if (pwr_diff > customer_pwr_diff)
+ pwr_diff = 0;
+ else
+ pwr_diff = customer_pwr_diff - pwr_diff;
+
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] =
+ (u8)((rtlphy->mcs_offset
+ [chnlgroup][index + (rf ? 8 : 0)] &
+ (0x7f << (i * 8))) >> (i * 8));
+
+ if (pwr_diff_limit[i] > pwr_diff)
+ pwr_diff_limit[i] = pwr_diff;
+ }
+
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) |
+ (pwr_diff_limit[0]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Customer's limit rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), customer_limit);
+
+ writeval = customer_limit + ((index < 2) ?
+ powerbase0[rf] :
+ powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Customer, writeval rf(%c)= 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+ break;
+ default:
+ chnlgroup = 0;
+ writeval =
+ rtlphy->mcs_offset[chnlgroup]
+ [index + (rf ? 8 : 0)]
+ + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RTK better performance, writeval "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval);
+ break;
+ }
+
+ if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ writeval = writeval - 0x06060606;
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_BT2)
+ writeval = writeval - 0x0c0c0c0c;
+ *(p_outwriteval + rf) = writeval;
+ }
+}
+
+static void _rtl8723be_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 regoffset_a[6] = {
+ RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ u16 regoffset_b[6] = {
+ RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeval;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeval = value[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8) ((writeval & (0x7f <<
+ (i * 8))) >> (i * 8));
+
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Set 0x%x = %08x\n", regoffset, writeval);
+ }
+}
+
+void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm,
+ u8 *ppowerlevel_bw20,
+ u8 *ppowerlevel_bw40, u8 channel)
+{
+ u32 writeval[2], powerbase0[2], powerbase1[2];
+ u8 index;
+ u8 direction;
+ u32 pwrtrac_value;
+
+ rtl8723be_phy_get_power_base(hw, ppowerlevel_ofdm, ppowerlevel_bw20,
+ ppowerlevel_bw40, channel,
+ &powerbase0[0], &powerbase1[0]);
+
+ rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
+
+ for (index = 0; index < 6; index++) {
+ txpwr_by_regulatory(hw, channel, index, &powerbase0[0],
+ &powerbase1[0], &writeval[0]);
+ if (direction == 1) {
+ writeval[0] += pwrtrac_value;
+ writeval[1] += pwrtrac_value;
+ } else if (direction == 2) {
+ writeval[0] -= pwrtrac_value;
+ writeval[1] -= pwrtrac_value;
+ }
+ _rtl8723be_write_ofdm_power_reg(hw, index, &writeval[0]);
+ }
+}
+
+bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+
+ return _rtl8723be_phy_rf6052_config_parafile(hw);
+}
+
+static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg;
+ u32 u4_regvalue = 0;
+ u8 rfpath;
+ bool rtstatus = true;
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16);
+ break;
+ }
+
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+ B3WIREADDREAALENGTH, 0x0);
+ udelay(1);
+
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ break;
+ case RF90_PATH_B:
+ rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw,
+ (enum radio_path)rfpath);
+ break;
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV, u4_regvalue);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16, u4_regvalue);
+ break;
+ }
+
+ if (!rtstatus) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!", rfpath);
+ return false;
+ }
+ }
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ return rtstatus;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
new file mode 100644
index 000000000000..a6fea106ced4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_RF_H__
+#define __RTL8723BE_RF_H__
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+
+void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+ u8 bandwidth);
+void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel_ofdm,
+ u8 *ppowerlevel_bw20,
+ u8 *ppowerlevel_bw40,
+ u8 channel);
+bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
new file mode 100644
index 000000000000..b4577ebc4bb0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -0,0 +1,384 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "../rtl8723com/phy_common.h"
+#include "dm.h"
+#include "hw.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+#include "../btcoexist/rtl_btc.h"
+
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ /*close ASPM for AMD defaultly */
+ rtlpci->const_amdpci_aspm = 0;
+
+ /* ASPM PS mode.
+ * 0 - Disable ASPM,
+ * 1 - Enable ASPM without Clock Req,
+ * 2 - Enable ASPM with Clock Req,
+ * 3 - Alwyas Enable ASPM with Clock Req,
+ * 4 - Always Enable ASPM without Clock Req.
+ * set defult to RTL8192CE:3 RTL8192E:2
+ */
+ rtlpci->const_pci_aspm = 3;
+
+ /*Setting for PCI-E device */
+ rtlpci->const_devicepci_aspm_setting = 0x03;
+
+ /*Setting for PCI-E bridge */
+ rtlpci->const_hostpci_aspm_setting = 0x02;
+
+ /* In Hw/Sw Radio Off situation.
+ * 0 - Default,
+ * 1 - From ASPM setting without low Mac Pwr,
+ * 2 - From ASPM setting with low Mac Pwr,
+ * 3 - Bus D3
+ * set default to RTL8192CE:0 RTL8192SE:2
+ */
+ rtlpci->const_hwsw_rfoff_d3 = 0;
+
+ /* This setting works for those device with
+ * backdoor ASPM setting such as EPHY setting.
+ * 0 - Not support ASPM,
+ * 1 - Support ASPM,
+ * 2 - According to chipset.
+ */
+ rtlpci->const_support_pciaspm = 1;
+}
+
+int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+{
+ int err = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ rtl8723be_bt_reg_init(hw);
+ rtlpci->msi_support = true;
+ rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+
+ rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
+
+ mac->ht_enable = true;
+
+ /* compatible 5G band 88ce just 2.4G band & smsp */
+ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
+ rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
+ rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
+
+ rtlpci->receive_config = (RCR_APPFCS |
+ RCR_APP_MIC |
+ RCR_APP_ICV |
+ RCR_APP_PHYST_RXFF |
+ RCR_HTC_LOC_CTRL |
+ RCR_AMF |
+ RCR_ACF |
+ RCR_ADF |
+ RCR_AICV |
+ RCR_AB |
+ RCR_AM |
+ RCR_APM |
+ 0);
+
+ rtlpci->irq_mask[0] = (u32) (IMR_PSTIMEOUT |
+ IMR_HSISR_IND_ON_INT |
+ IMR_C2HCMD |
+ IMR_HIGHDOK |
+ IMR_MGNTDOK |
+ IMR_BKDOK |
+ IMR_BEDOK |
+ IMR_VIDOK |
+ IMR_VODOK |
+ IMR_RDU |
+ IMR_ROK |
+ 0);
+
+ rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0);
+
+ /* for debug level */
+ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
+ /* for LPS & IPS */
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ rtlpriv->psc.reg_fwctrl_lps = 3;
+ rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+ /* for ASPM, you can close aspm through
+ * set const_support_pciaspm = 0
+ */
+ rtl8723be_init_aspm_vars(hw);
+
+ if (rtlpriv->psc.reg_fwctrl_lps == 1)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
+ else if (rtlpriv->psc.reg_fwctrl_lps == 2)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
+ else if (rtlpriv->psc.reg_fwctrl_lps == 3)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
+ if (!rtlpriv->rtlhal.pfirmware) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Can't alloc buffer for fw.\n");
+ return 1;
+ }
+
+ rtlpriv->max_fw_size = 0x8000;
+ pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Failed to request firmware!\n");
+ return 1;
+ }
+ return 0;
+}
+
+void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->cfg->ops->get_btc_status())
+ rtlpriv->btcoexist.btc_ops->btc_halt_notify();
+ if (rtlpriv->rtlhal.pfirmware) {
+ vfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ }
+}
+
+/* get bt coexist status */
+bool rtl8723be_get_btc_status(void)
+{
+ return true;
+}
+
+static bool is_fw_header(struct rtl92c_firmware_header *hdr)
+{
+ return (hdr->signature & 0xfff0) == 0x5300;
+}
+
+static struct rtl_hal_ops rtl8723be_hal_ops = {
+ .init_sw_vars = rtl8723be_init_sw_vars,
+ .deinit_sw_vars = rtl8723be_deinit_sw_vars,
+ .read_eeprom_info = rtl8723be_read_eeprom_info,
+ .interrupt_recognized = rtl8723be_interrupt_recognized,
+ .hw_init = rtl8723be_hw_init,
+ .hw_disable = rtl8723be_card_disable,
+ .hw_suspend = rtl8723be_suspend,
+ .hw_resume = rtl8723be_resume,
+ .enable_interrupt = rtl8723be_enable_interrupt,
+ .disable_interrupt = rtl8723be_disable_interrupt,
+ .set_network_type = rtl8723be_set_network_type,
+ .set_chk_bssid = rtl8723be_set_check_bssid,
+ .set_qos = rtl8723be_set_qos,
+ .set_bcn_reg = rtl8723be_set_beacon_related_registers,
+ .set_bcn_intv = rtl8723be_set_beacon_interval,
+ .update_interrupt_mask = rtl8723be_update_interrupt_mask,
+ .get_hw_reg = rtl8723be_get_hw_reg,
+ .set_hw_reg = rtl8723be_set_hw_reg,
+ .update_rate_tbl = rtl8723be_update_hal_rate_tbl,
+ .fill_tx_desc = rtl8723be_tx_fill_desc,
+ .fill_tx_cmddesc = rtl8723be_tx_fill_cmddesc,
+ .query_rx_desc = rtl8723be_rx_query_desc,
+ .set_channel_access = rtl8723be_update_channel_access_setting,
+ .radio_onoff_checking = rtl8723be_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl8723be_phy_set_bw_mode,
+ .switch_channel = rtl8723be_phy_sw_chnl,
+ .dm_watchdog = rtl8723be_dm_watchdog,
+ .scan_operation_backup = rtl8723be_phy_scan_operation_backup,
+ .set_rf_power_state = rtl8723be_phy_set_rf_power_state,
+ .led_control = rtl8723be_led_control,
+ .set_desc = rtl8723be_set_desc,
+ .get_desc = rtl8723be_get_desc,
+ .is_tx_desc_closed = rtl8723be_is_tx_desc_closed,
+ .tx_polling = rtl8723be_tx_polling,
+ .enable_hw_sec = rtl8723be_enable_hw_security_config,
+ .set_key = rtl8723be_set_key,
+ .init_sw_leds = rtl8723be_init_sw_leds,
+ .get_bbreg = rtl8723_phy_query_bb_reg,
+ .set_bbreg = rtl8723_phy_set_bb_reg,
+ .get_rfreg = rtl8723be_phy_query_rf_reg,
+ .set_rfreg = rtl8723be_phy_set_rf_reg,
+ .fill_h2c_cmd = rtl8723be_fill_h2c_cmd,
+ .get_btc_status = rtl8723be_get_btc_status,
+ .is_fw_header = is_fw_header,
+};
+
+static struct rtl_mod_params rtl8723be_mod_params = {
+ .sw_crypto = false,
+ .inactiveps = true,
+ .swctrl_lps = false,
+ .fwctrl_lps = true,
+ .debug = DBG_EMERG,
+};
+
+static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+ .bar_id = 2,
+ .write_readback = true,
+ .name = "rtl8723be_pci",
+ .fw_name = "rtlwifi/rtl8723befw.bin",
+ .ops = &rtl8723be_hal_ops,
+ .mod_params = &rtl8723be_mod_params,
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = AM,
+ .maps[MAC_RCR_AB] = AB,
+ .maps[MAC_RCR_ACRC32] = ACRC32,
+ .maps[MAC_RCR_ACF] = ACF,
+ .maps[MAC_RCR_AAP] = AAP,
+
+ .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0,
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+ .maps[EFUSE_ANA8M] = ANA8M,
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+ .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+ .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+ .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(rtl8723be_pci_id) = {
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb723, rtl8723be_hal_cfg)},
+ {},
+};
+
+MODULE_DEVICE_TABLE(pci, rtl8723be_pci_id);
+
+MODULE_AUTHOR("PageHe <page_he@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin");
+
+module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
+module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
+module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
+module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
+MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
+MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+
+static struct pci_driver rtl8723be_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtl8723be_pci_id,
+ .probe = rtl_pci_probe,
+ .remove = rtl_pci_disconnect,
+
+ .driver.pm = &rtlwifi_pm_ops,
+};
+
+module_pci_driver(rtl8723be_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/rtlwifi/rtl8723be/sw.h
new file mode 100644
index 000000000000..a7b25e769950
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_SW_H__
+#define __RTL8723BE_SW_H__
+
+int rtl8723be_init_sw_vars(struct ieee80211_hw *hw);
+void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw);
+void rtl8723be_init_var_map(struct ieee80211_hw *hw);
+bool rtl8723be_get_btc_status(void);
+
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/rtlwifi/rtl8723be/table.c
new file mode 100644
index 000000000000..4b283cde042e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/table.c
@@ -0,0 +1,572 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+u32 RTL8723BEPHY_REG_1TARRAY[] = {
+ 0x800, 0x80040000,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x02200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A11A9,
+ 0x85C, 0x01000014,
+ 0x860, 0x66F60110,
+ 0x864, 0x061F0649,
+ 0x868, 0x00000000,
+ 0x86C, 0x27272700,
+ 0x870, 0x07000760,
+ 0x874, 0x25004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x00000000,
+ 0x880, 0xB0000C1C,
+ 0x884, 0x00000001,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0x910, 0x00000002,
+ 0x914, 0x00000201,
+ 0x948, 0x00000000,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x80FF000C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7F120F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x21806490,
+ 0xB2C, 0x00000000,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x08800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC44,
+ 0xC34, 0x469652AF,
+ 0xC38, 0x49795994,
+ 0xC3C, 0x0A97971C,
+ 0xC40, 0x1F7C403F,
+ 0xC44, 0x000100B7,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69553420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x00023169,
+ 0xC5C, 0x00250492,
+ 0xC60, 0x00000000,
+ 0xC64, 0x7112848B,
+ 0xC68, 0x47C00BFF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x020610DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x390000E4,
+ 0xC84, 0x20F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00020E1A,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00020E1A,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00000000,
+ 0xCA4, 0x000300A0,
+ 0xCA8, 0x00000000,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x64B22427,
+ 0xCDC, 0x00766932,
+ 0xCE0, 0x00222222,
+ 0xCE4, 0x00000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00000740,
+ 0xD04, 0x40020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC53,
+ 0xD18, 0x7A8F5B6F,
+ 0xD2C, 0xCC979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x80608000,
+ 0xD38, 0x00000000,
+ 0xD3C, 0x00127353,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x6437140A,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000282,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xD6C, 0x2A201C16,
+ 0xD70, 0x1812362E,
+ 0xD74, 0x322C2220,
+ 0xD78, 0x000E3C24,
+ 0xE00, 0x2D2D2D2D,
+ 0xE04, 0x2D2D2D2D,
+ 0xE08, 0x0390272D,
+ 0xE10, 0x2D2D2D2D,
+ 0xE14, 0x2D2D2D2D,
+ 0xE18, 0x2D2D2D2D,
+ 0xE1C, 0x2D2D2D2D,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000008,
+ 0xE68, 0x001B2556,
+ 0xE6C, 0x00C00096,
+ 0xE70, 0x00C00096,
+ 0xE74, 0x01000056,
+ 0xE78, 0x01000014,
+ 0xE7C, 0x01000056,
+ 0xE80, 0x01000014,
+ 0xE84, 0x00C00096,
+ 0xE88, 0x01000056,
+ 0xE8C, 0x00C00096,
+ 0xED0, 0x00C00096,
+ 0xED4, 0x00C00096,
+ 0xED8, 0x00C00096,
+ 0xEDC, 0x000000D6,
+ 0xEE0, 0x000000D6,
+ 0xEEC, 0x01C00016,
+ 0xF14, 0x00000003,
+ 0xF4C, 0x00000000,
+ 0xF00, 0x00000300,
+ 0x820, 0x01000100,
+ 0x800, 0x83040000,
+};
+
+u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
+ 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00004000,
+ 0, 0, 0, 0x0000086c, 0xffffff00, 0x34363800,
+ 0, 0, 0, 0x00000e00, 0xffffffff, 0x42444646,
+ 0, 0, 0, 0x00000e04, 0xffffffff, 0x30343840,
+ 0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244,
+ 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
+};
+
+u32 RTL8723BE_RADIOA_1TARRAY[] = {
+ 0x000, 0x00010000,
+ 0x0B0, 0x000DFFE0,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B1, 0x00000018,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B2, 0x00084C00,
+ 0x0B5, 0x0000D2CC,
+ 0x0B6, 0x000925AA,
+ 0x0B7, 0x00000010,
+ 0x0B8, 0x0000907F,
+ 0x05C, 0x00000002,
+ 0x07C, 0x00000002,
+ 0x07E, 0x00000005,
+ 0x08B, 0x0006FC00,
+ 0x0B0, 0x000FF9F0,
+ 0x01C, 0x000739D2,
+ 0x01E, 0x00000000,
+ 0x0DF, 0x00000780,
+ 0x050, 0x00067435,
+ 0x051, 0x0006B04E,
+ 0x052, 0x000007D2,
+ 0x053, 0x00000000,
+ 0x054, 0x00050400,
+ 0x055, 0x0004026E,
+ 0x0DD, 0x0000004C,
+ 0x070, 0x00067435,
+ 0x071, 0x0006B04E,
+ 0x072, 0x000007D2,
+ 0x073, 0x00000000,
+ 0x074, 0x00050400,
+ 0x075, 0x0004026E,
+ 0x0EF, 0x00000100,
+ 0x034, 0x0000ADD7,
+ 0x035, 0x00005C00,
+ 0x034, 0x00009DD4,
+ 0x035, 0x00005000,
+ 0x034, 0x00008DD1,
+ 0x035, 0x00004400,
+ 0x034, 0x00007DCE,
+ 0x035, 0x00003800,
+ 0x034, 0x00006CD1,
+ 0x035, 0x00004400,
+ 0x034, 0x00005CCE,
+ 0x035, 0x00003800,
+ 0x034, 0x000048CE,
+ 0x035, 0x00004400,
+ 0x034, 0x000034CE,
+ 0x035, 0x00003800,
+ 0x034, 0x00002451,
+ 0x035, 0x00004400,
+ 0x034, 0x0000144E,
+ 0x035, 0x00003800,
+ 0x034, 0x00000051,
+ 0x035, 0x00004400,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x0ED, 0x00000010,
+ 0x044, 0x0000ADD7,
+ 0x044, 0x00009DD4,
+ 0x044, 0x00008DD1,
+ 0x044, 0x00007DCE,
+ 0x044, 0x00006CC1,
+ 0x044, 0x00005CCE,
+ 0x044, 0x000044D1,
+ 0x044, 0x000034CE,
+ 0x044, 0x00002451,
+ 0x044, 0x0000144E,
+ 0x044, 0x00000051,
+ 0x0EF, 0x00000000,
+ 0x0ED, 0x00000000,
+ 0x0EF, 0x00002000,
+ 0x03B, 0x000380EF,
+ 0x03B, 0x000302FE,
+ 0x03B, 0x00028CE6,
+ 0x03B, 0x000200BC,
+ 0x03B, 0x000188A5,
+ 0x03B, 0x00010FBC,
+ 0x03B, 0x00008F71,
+ 0x03B, 0x00000900,
+ 0x0EF, 0x00000000,
+ 0x0ED, 0x00000001,
+ 0x040, 0x000380EF,
+ 0x040, 0x000302FE,
+ 0x040, 0x00028CE6,
+ 0x040, 0x000200BC,
+ 0x040, 0x000188A5,
+ 0x040, 0x00010FBC,
+ 0x040, 0x00008F71,
+ 0x040, 0x00000900,
+ 0x0ED, 0x00000000,
+ 0x082, 0x00080000,
+ 0x083, 0x00008000,
+ 0x084, 0x00048D80,
+ 0x085, 0x00068000,
+ 0x0A2, 0x00080000,
+ 0x0A3, 0x00008000,
+ 0x0A4, 0x00048D80,
+ 0x0A5, 0x00068000,
+ 0x000, 0x00033D80,
+};
+
+u32 RTL8723BEMAC_1T_ARRAY[] = {
+ 0x02F, 0x00000030,
+ 0x035, 0x00000000,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x456, 0x0000005E,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000050,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000050,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+};
+
+u32 RTL8723BEAGCTAB_1TARRAY[] = {
+ 0xC78, 0xFD000001,
+ 0xC78, 0xFC010001,
+ 0xC78, 0xFB020001,
+ 0xC78, 0xFA030001,
+ 0xC78, 0xF9040001,
+ 0xC78, 0xF8050001,
+ 0xC78, 0xF7060001,
+ 0xC78, 0xF6070001,
+ 0xC78, 0xF5080001,
+ 0xC78, 0xF4090001,
+ 0xC78, 0xF30A0001,
+ 0xC78, 0xF20B0001,
+ 0xC78, 0xF10C0001,
+ 0xC78, 0xF00D0001,
+ 0xC78, 0xEF0E0001,
+ 0xC78, 0xEE0F0001,
+ 0xC78, 0xED100001,
+ 0xC78, 0xEC110001,
+ 0xC78, 0xEB120001,
+ 0xC78, 0xEA130001,
+ 0xC78, 0xE9140001,
+ 0xC78, 0xE8150001,
+ 0xC78, 0xE7160001,
+ 0xC78, 0xAA170001,
+ 0xC78, 0xA9180001,
+ 0xC78, 0xA8190001,
+ 0xC78, 0xA71A0001,
+ 0xC78, 0xA61B0001,
+ 0xC78, 0xA51C0001,
+ 0xC78, 0xA41D0001,
+ 0xC78, 0xA31E0001,
+ 0xC78, 0x671F0001,
+ 0xC78, 0x66200001,
+ 0xC78, 0x65210001,
+ 0xC78, 0x64220001,
+ 0xC78, 0x63230001,
+ 0xC78, 0x62240001,
+ 0xC78, 0x61250001,
+ 0xC78, 0x47260001,
+ 0xC78, 0x46270001,
+ 0xC78, 0x45280001,
+ 0xC78, 0x44290001,
+ 0xC78, 0x432A0001,
+ 0xC78, 0x422B0001,
+ 0xC78, 0x292C0001,
+ 0xC78, 0x282D0001,
+ 0xC78, 0x272E0001,
+ 0xC78, 0x262F0001,
+ 0xC78, 0x25300001,
+ 0xC78, 0x24310001,
+ 0xC78, 0x09320001,
+ 0xC78, 0x08330001,
+ 0xC78, 0x07340001,
+ 0xC78, 0x06350001,
+ 0xC78, 0x05360001,
+ 0xC78, 0x04370001,
+ 0xC78, 0x03380001,
+ 0xC78, 0x02390001,
+ 0xC78, 0x013A0001,
+ 0xC78, 0x003B0001,
+ 0xC78, 0x003C0001,
+ 0xC78, 0x003D0001,
+ 0xC78, 0x003E0001,
+ 0xC78, 0x003F0001,
+ 0xC78, 0xFC400001,
+ 0xC78, 0xFB410001,
+ 0xC78, 0xFA420001,
+ 0xC78, 0xF9430001,
+ 0xC78, 0xF8440001,
+ 0xC78, 0xF7450001,
+ 0xC78, 0xF6460001,
+ 0xC78, 0xF5470001,
+ 0xC78, 0xF4480001,
+ 0xC78, 0xF3490001,
+ 0xC78, 0xF24A0001,
+ 0xC78, 0xF14B0001,
+ 0xC78, 0xF04C0001,
+ 0xC78, 0xEF4D0001,
+ 0xC78, 0xEE4E0001,
+ 0xC78, 0xED4F0001,
+ 0xC78, 0xEC500001,
+ 0xC78, 0xEB510001,
+ 0xC78, 0xEA520001,
+ 0xC78, 0xE9530001,
+ 0xC78, 0xE8540001,
+ 0xC78, 0xE7550001,
+ 0xC78, 0xE6560001,
+ 0xC78, 0xE5570001,
+ 0xC78, 0xAA580001,
+ 0xC78, 0xA9590001,
+ 0xC78, 0xA85A0001,
+ 0xC78, 0xA75B0001,
+ 0xC78, 0xA65C0001,
+ 0xC78, 0xA55D0001,
+ 0xC78, 0xA45E0001,
+ 0xC78, 0x675F0001,
+ 0xC78, 0x66600001,
+ 0xC78, 0x65610001,
+ 0xC78, 0x64620001,
+ 0xC78, 0x63630001,
+ 0xC78, 0x62640001,
+ 0xC78, 0x61650001,
+ 0xC78, 0x47660001,
+ 0xC78, 0x46670001,
+ 0xC78, 0x45680001,
+ 0xC78, 0x44690001,
+ 0xC78, 0x436A0001,
+ 0xC78, 0x426B0001,
+ 0xC78, 0x296C0001,
+ 0xC78, 0x286D0001,
+ 0xC78, 0x276E0001,
+ 0xC78, 0x266F0001,
+ 0xC78, 0x25700001,
+ 0xC78, 0x24710001,
+ 0xC78, 0x09720001,
+ 0xC78, 0x08730001,
+ 0xC78, 0x07740001,
+ 0xC78, 0x06750001,
+ 0xC78, 0x05760001,
+ 0xC78, 0x04770001,
+ 0xC78, 0x03780001,
+ 0xC78, 0x02790001,
+ 0xC78, 0x017A0001,
+ 0xC78, 0x007B0001,
+ 0xC78, 0x007C0001,
+ 0xC78, 0x007D0001,
+ 0xC78, 0x007E0001,
+ 0xC78, 0x007F0001,
+ 0xC50, 0x69553422,
+ 0xC50, 0x69553420,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/rtlwifi/rtl8723be/table.h
new file mode 100644
index 000000000000..932760a84827
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/table.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_TABLE__H_
+#define __RTL8723BE_TABLE__H_
+
+#include <linux/types.h>
+#define RTL8723BEPHY_REG_1TARRAYLEN 388
+extern u32 RTL8723BEPHY_REG_1TARRAY[];
+#define RTL8723BEPHY_REG_ARRAY_PGLEN 36
+extern u32 RTL8723BEPHY_REG_ARRAY_PG[];
+#define RTL8723BE_RADIOA_1TARRAYLEN 206
+extern u32 RTL8723BE_RADIOA_1TARRAY[];
+#define RTL8723BEMAC_1T_ARRAYLEN 194
+extern u32 RTL8723BEMAC_1T_ARRAY[];
+#define RTL8723BEAGCTAB_1TARRAYLEN 260
+extern u32 RTL8723BEAGCTAB_1TARRAY[];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
new file mode 100644
index 000000000000..e0a0d8c8fed5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -0,0 +1,960 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../stats.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "trx.h"
+#include "led.h"
+#include "dm.h"
+#include "phy.h"
+
+static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
+{
+ __le16 fc = rtl_get_fc(skb);
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ return QSLT_BEACON;
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+ return QSLT_MGNT;
+
+ return skb->priority;
+}
+
+/* mac80211's rate_idx is like this:
+ *
+ * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ *
+ * B/G rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
+ *
+ * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * A rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
+ */
+static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw,
+ bool isht, u8 desc_rate)
+{
+ int rate_idx;
+
+ if (!isht) {
+ if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+ switch (desc_rate) {
+ case DESC92C_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ } else {
+ switch (desc_rate) {
+ case DESC92C_RATE6M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 7;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+ } else {
+ switch (desc_rate) {
+ case DESC92C_RATEMCS0:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATEMCS1:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATEMCS2:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATEMCS3:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATEMCS4:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATEMCS5:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATEMCS6:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATEMCS7:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATEMCS8:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATEMCS9:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATEMCS10:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATEMCS11:
+ rate_idx = 11;
+ break;
+ case DESC92C_RATEMCS12:
+ rate_idx = 12;
+ break;
+ case DESC92C_RATEMCS13:
+ rate_idx = 13;
+ break;
+ case DESC92C_RATEMCS14:
+ rate_idx = 14;
+ break;
+ case DESC92C_RATEMCS15:
+ rate_idx = 15;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ }
+ return rate_idx;
+}
+
+static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstatus, u8 *pdesc,
+ struct rx_fwinfo_8723be *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct phy_sts_cck_8723e_t *cck_buf;
+ struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo;
+ struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+ char rx_pwr_all = 0, rx_pwr[4];
+ u8 rf_rx_num = 0, evm, pwdb_all;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool is_cck = pstatus->is_cck;
+ u8 lan_idx, vga_idx;
+
+ /* Record it for next packet processing */
+ pstatus->packet_matchbssid = packet_match_bssid;
+ pstatus->packet_toself = packet_toself;
+ pstatus->packet_beacon = packet_beacon;
+ pstatus->rx_mimo_sig_qual[0] = -1;
+ pstatus->rx_mimo_sig_qual[1] = -1;
+
+ if (is_cck) {
+ u8 cck_highpwr;
+ u8 cck_agc_rpt;
+ /* CCK Driver info Structure is not the same as OFDM packet. */
+ cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo;
+ cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+ /* (1)Hardware does not provide RSSI for CCK
+ * (2)PWDB, Average PWDB cacluated by
+ * hardware (for rate adaptive)
+ */
+ if (ppsc->rfpwr_state == ERFON)
+ cck_highpwr = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ BIT(9));
+ else
+ cck_highpwr = false;
+
+ lan_idx = ((cck_agc_rpt & 0xE0) >> 5);
+ vga_idx = (cck_agc_rpt & 0x1f);
+ switch (lan_idx) {
+ case 7:
+ if (vga_idx <= 27)/*VGA_idx = 27~2*/
+ rx_pwr_all = -100 + 2 * (27 - vga_idx);
+ else
+ rx_pwr_all = -100;
+ break;
+ case 6:/*VGA_idx = 2~0*/
+ rx_pwr_all = -48 + 2 * (2 - vga_idx);
+ break;
+ case 5:/*VGA_idx = 7~5*/
+ rx_pwr_all = -42 + 2 * (7 - vga_idx);
+ break;
+ case 4:/*VGA_idx = 7~4*/
+ rx_pwr_all = -36 + 2 * (7 - vga_idx);
+ break;
+ case 3:/*VGA_idx = 7~0*/
+ rx_pwr_all = -24 + 2 * (7 - vga_idx);
+ break;
+ case 2:
+ if (cck_highpwr)/*VGA_idx = 5~0*/
+ rx_pwr_all = -12 + 2 * (5 - vga_idx);
+ else
+ rx_pwr_all = -6 + 2 * (5 - vga_idx);
+ break;
+ case 1:
+ rx_pwr_all = 8 - 2 * vga_idx;
+ break;
+ case 0:
+ rx_pwr_all = 14 - 2 * vga_idx;
+ break;
+ default:
+ break;
+ }
+ rx_pwr_all += 6;
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ /* CCK gain is smaller than OFDM/MCS gain, */
+ /* so we add gain diff by experiences,
+ * the val is 6
+ */
+ pwdb_all += 6;
+ if (pwdb_all > 100)
+ pwdb_all = 100;
+ /* modify the offset to make the same gain index with OFDM. */
+ if (pwdb_all > 34 && pwdb_all <= 42)
+ pwdb_all -= 2;
+ else if (pwdb_all > 26 && pwdb_all <= 34)
+ pwdb_all -= 6;
+ else if (pwdb_all > 14 && pwdb_all <= 26)
+ pwdb_all -= 8;
+ else if (pwdb_all > 4 && pwdb_all <= 14)
+ pwdb_all -= 4;
+ if (!cck_highpwr) {
+ if (pwdb_all >= 80)
+ pwdb_all = ((pwdb_all - 80) << 1) +
+ ((pwdb_all - 80) >> 1) + 80;
+ else if ((pwdb_all <= 78) && (pwdb_all >= 20))
+ pwdb_all += 3;
+ if (pwdb_all > 100)
+ pwdb_all = 100;
+ }
+
+ pstatus->rx_pwdb_all = pwdb_all;
+ pstatus->recvsignalpower = rx_pwr_all;
+
+ /* (3) Get Signal Quality (EVM) */
+ if (packet_match_bssid) {
+ u8 sq;
+
+ if (pstatus->rx_pwdb_all > 40) {
+ sq = 100;
+ } else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+
+ pstatus->signalquality = sq;
+ pstatus->rx_mimo_sig_qual[0] = sq;
+ pstatus->rx_mimo_sig_qual[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+
+ /* (1)Get RSSI for HT rate */
+ for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+ /* we will judge RF RX path now. */
+ if (rtlpriv->dm.rfpath_rxenable[i])
+ rf_rx_num++;
+
+ rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f)*2) - 110;
+
+ /* Translate DBM to percentage. */
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+
+ /* Get Rx snr value in DB */
+ rtlpriv->stats.rx_snr_db[i] =
+ (long)(p_drvinfo->rxsnr[i] / 2);
+
+ /* Record Signal Strength for next packet */
+ if (packet_match_bssid)
+ pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
+ }
+
+ /* (2)PWDB, Avg cacluated by hardware (for rate adaptive) */
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ pstatus->rx_pwdb_all = pwdb_all;
+ pstatus->rxpower = rx_pwr_all;
+ pstatus->recvsignalpower = rx_pwr_all;
+
+ /* (3)EVM of HT rate */
+ if (pstatus->is_ht && pstatus->rate >= DESC92C_RATEMCS8 &&
+ pstatus->rate <= DESC92C_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+
+ if (packet_match_bssid) {
+ /* Fill value in RFD, Get the first
+ * spatial stream only
+ */
+ if (i == 0)
+ pstatus->signalquality =
+ (u8) (evm & 0xff);
+ pstatus->rx_mimo_sig_qual[i] =
+ (u8) (evm & 0xff);
+ }
+ }
+ if (packet_match_bssid) {
+ for (i = RF90_PATH_A; i <= RF90_PATH_B; i++)
+ rtl_priv(hw)->dm.cfo_tail[i] =
+ (char)p_phystrpt->path_cfotail[i];
+
+ rtl_priv(hw)->dm.packet_count++;
+ if (rtl_priv(hw)->dm.packet_count == 0xffffffff)
+ rtl_priv(hw)->dm.packet_count = 0;
+ }
+ }
+
+ /* UI BSS List signal strength(in percentage),
+ * make it good looking, from 0~100.
+ */
+ if (is_cck)
+ pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ pwdb_all));
+ else if (rf_rx_num != 0)
+ pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ total_rssi /= rf_rx_num));
+ /*HW antenna diversity*/
+ rtldm->fat_table.antsel_rx_keep_0 = p_phystrpt->ant_sel;
+ rtldm->fat_table.antsel_rx_keep_1 = p_phystrpt->ant_sel_b;
+ rtldm->fat_table.antsel_rx_keep_2 = p_phystrpt->antsel_rx_keep_2;
+}
+
+static void _rtl8723be_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstatus,
+ u8 *pdesc,
+ struct rx_fwinfo_8723be *p_drvinfo)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_hdr *hdr;
+ u8 *tmp_buf;
+ u8 *praddr;
+ u8 *psaddr;
+ u16 fc, type;
+ bool packet_matchbssid, packet_toself, packet_beacon;
+
+ tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
+
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = le16_to_cpu(hdr->frame_control);
+ type = WLAN_FC_GET_TYPE(hdr->frame_control);
+ praddr = hdr->addr1;
+ psaddr = ieee80211_get_SA(hdr);
+ memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
+
+ packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
+ (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ?
+ hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
+ hdr->addr2 : hdr->addr3)) &&
+ (!pstatus->hwerror) &&
+ (!pstatus->crc) && (!pstatus->icv));
+
+ packet_toself = packet_matchbssid &&
+ (!ether_addr_equal(praddr, rtlefuse->dev_addr));
+
+ /* YP: packet_beacon is not initialized,
+ * this assignment is neccesary,
+ * otherwise it counld be true in this case
+ * the situation is much worse in Kernel 3.10
+ */
+ if (ieee80211_is_beacon(hdr->frame_control))
+ packet_beacon = true;
+ else
+ packet_beacon = false;
+
+ if (packet_beacon && packet_matchbssid)
+ rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++;
+
+ _rtl8723be_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
+ packet_matchbssid,
+ packet_toself,
+ packet_beacon);
+
+ rtl_process_phyinfo(hw, tmp_buf, pstatus);
+}
+
+static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+ u8 *virtualaddress)
+{
+ u32 dwtmp = 0;
+ memset(virtualaddress, 0, 8);
+
+ SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+ if (ptcb_desc->empkt_num == 1) {
+ dwtmp = ptcb_desc->empkt_len[0];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[0];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[1];
+ }
+ SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
+
+ if (ptcb_desc->empkt_num <= 3) {
+ dwtmp = ptcb_desc->empkt_len[2];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[2];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[3];
+ }
+ SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
+ if (ptcb_desc->empkt_num <= 5) {
+ dwtmp = ptcb_desc->empkt_len[4];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[4];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[5];
+ }
+ SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
+ SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
+ if (ptcb_desc->empkt_num <= 7) {
+ dwtmp = ptcb_desc->empkt_len[6];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[6];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[7];
+ }
+ SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
+ if (ptcb_desc->empkt_num <= 9) {
+ dwtmp = ptcb_desc->empkt_len[8];
+ } else {
+ dwtmp = ptcb_desc->empkt_len[8];
+ dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+ dwtmp += ptcb_desc->empkt_len[9];
+ }
+ SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
+}
+
+bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rx_fwinfo_8723be *p_drvinfo;
+ struct ieee80211_hdr *hdr;
+
+ u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
+ if (status->packet_report_type == TX_REPORT2)
+ status->length = (u16) GET_RX_RPT2_DESC_PKT_LEN(pdesc);
+ else
+ status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+ status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+ status->icv = (u16) GET_RX_DESC_ICV(pdesc);
+ status->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ status->hwerror = (status->crc | status->icv);
+ status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+ status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
+ status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+ status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ status->isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ if (status->packet_report_type == NORMAL_RX)
+ status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+
+ status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate);
+
+ status->macid = GET_RX_DESC_MACID(pdesc);
+ if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+ status->wake_match = BIT(2);
+ else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+ status->wake_match = BIT(1);
+ else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
+ status->wake_match = BIT(0);
+ else
+ status->wake_match = 0;
+ if (status->wake_match)
+ RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ status->wake_match);
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+
+
+ hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size +
+ status->rx_bufshift);
+
+ if (status->crc)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (status->rx_is40Mhzpacket)
+ rx_status->flag |= RX_FLAG_40MHZ;
+
+ if (status->is_ht)
+ rx_status->flag |= RX_FLAG_HT;
+
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+ /* hw will set status->decrypted true, if it finds the
+ * frame is open data frame or mgmt frame.
+ * So hw will not decryption robust managment frame
+ * for IEEE80211w but still set status->decrypted
+ * true, so here we should set it back to undecrypted
+ * for IEEE80211w frame, and mac80211 sw will help
+ * to decrypt it
+ */
+ if (status->decrypted) {
+ if (!hdr) {
+ WARN_ON_ONCE(true);
+ pr_err("decrypted is true but hdr NULL in skb %p\n",
+ rtl_get_hdr(skb));
+ return false;
+ }
+
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
+ (ieee80211_has_protected(hdr->frame_control)))
+ rx_status->flag &= ~RX_FLAG_DECRYPTED;
+ else
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ }
+
+ /* rate_idx: index of data rate into band's
+ * supported rates or MCS index if HT rates
+ * are use (RX_FLAG_HT)
+ * Notice: this is diff with windows define
+ */
+ rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht,
+ status->rate);
+
+ rx_status->mactime = status->timestamp_low;
+ if (phystatus) {
+ p_drvinfo = (struct rx_fwinfo_8723be *)(skb->data +
+ status->rx_bufshift);
+
+ _rtl8723be_translate_rx_signal_stuff(hw, skb, status,
+ pdesc, p_drvinfo);
+ }
+
+ /*rx_status->qual = status->signal; */
+ rx_status->signal = status->recvsignalpower + 10;
+ if (status->packet_report_type == TX_REPORT2) {
+ status->macid_valid_entry[0] =
+ GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
+ status->macid_valid_entry[1] =
+ GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
+ }
+ return true;
+}
+
+void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 *pdesc = pdesc_tx;
+ u16 seq_number;
+ __le16 fc = hdr->frame_control;
+ unsigned int buf_len = 0;
+ unsigned int skb_len = skb->len;
+ u8 fw_qsel = _rtl8723be_map_hwqueue_to_fwqueue(skb, hw_queue);
+ bool firstseg = ((hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+ bool lastseg = ((hdr->frame_control &
+ cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+ dma_addr_t mapping;
+ u8 bw_40 = 0;
+ u8 short_gi = 0;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION) {
+ bw_40 = mac->bw_40;
+ } else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (sta)
+ bw_40 = sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ }
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
+ /* reserve 8 byte for AMPDU early mode */
+ if (rtlhal->earlymode_enable) {
+ skb_push(skb, EM_HDR_LEN);
+ memset(skb->data, 0, EM_HDR_LEN);
+ }
+ buf_len = skb->len;
+ mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error");
+ return;
+ }
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723be));
+ if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
+ firstseg = true;
+ lastseg = true;
+ }
+ if (firstseg) {
+ if (rtlhal->earlymode_enable) {
+ SET_TX_DESC_PKT_OFFSET(pdesc, 1);
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+ EM_HDR_LEN);
+ if (ptcb_desc->empkt_num) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
+ _rtl8723be_insert_emcontent(ptcb_desc,
+ (u8 *)(skb->data));
+ }
+ } else {
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ }
+
+ /* ptcb_desc->use_driver_rate = true; */
+ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
+ short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
+ else
+ short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
+
+ SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+ SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ }
+ SET_TX_DESC_SEQ(pdesc, seq_number);
+ SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
+ !ptcb_desc->cts_enable) ?
+ 1 : 0));
+ SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0);
+ SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ?
+ 1 : 0));
+
+ SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
+
+ SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
+ SET_TX_DESC_RTS_SHORT(pdesc,
+ ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
+ (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
+ (ptcb_desc->rts_use_shortgi ? 1 : 0)));
+
+ if (ptcb_desc->btx_enable_sw_calc_duration)
+ SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
+
+ if (bw_40) {
+ if (ptcb_desc->packet_bw) {
+ SET_TX_DESC_DATA_BW(pdesc, 1);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc);
+ }
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ }
+
+ SET_TX_DESC_LINIP(pdesc, 0);
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+ if (sta) {
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
+ SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ }
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf =
+ info->control.hw_key;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ break;
+ }
+ }
+
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+ SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
+ SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+ 1 : 0);
+ SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
+ SET_TX_DESC_RDG_ENABLE(pdesc, 1);
+ SET_TX_DESC_HTC(pdesc, 1);
+ }
+ }
+ }
+
+ SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
+ SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
+ SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+
+ if (!ieee80211_is_data_qos(fc)) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_HWSEQ_SEL(pdesc, 0);
+ }
+ SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
+ SET_TX_DESC_BMC(pdesc, 1);
+ }
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+}
+
+void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 fw_queue = QSLT_BEACON;
+
+ dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+ skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+
+ if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error");
+ return;
+ }
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+ SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+
+ SET_TX_DESC_SEQ(pdesc, 0);
+
+ SET_TX_DESC_LINIP(pdesc, 0);
+
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
+
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+
+ SET_TX_DESC_RATE_ID(pdesc, 0);
+ SET_TX_DESC_MACID(pdesc, 0);
+
+ SET_TX_DESC_OWN(pdesc, 1);
+
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
+
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+}
+
+void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val)
+{
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ SET_TX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_TX_NEXTDESC_ADDR:
+ SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
+ break;
+ default:
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_RXOWN:
+ SET_RX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val);
+ break;
+ case HW_DESC_RXERO:
+ SET_RX_DESC_EOR(pdesc, 1);
+ break;
+ default:
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
+ break;
+ }
+ }
+}
+
+u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+{
+ u32 ret = 0;
+
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_TX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_TXBUFF_ADDR:
+ ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, "ERR txdesc :%d not process\n",
+ desc_name);
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_RX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ ret = GET_RX_DESC_PKT_LEN(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+ desc_name);
+ break;
+ }
+ }
+ return ret;
+}
+
+bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8) rtl8723be_get_desc(entry, true, HW_DESC_OWN);
+
+ /*beacon packet will only use the first
+ *descriptor by default, and the own may not
+ *be cleared by the hardware
+ */
+ if (own)
+ return false;
+ else
+ return true;
+}
+
+void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (hw_queue == BEACON_QUEUE) {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
+ } else {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
+ BIT(0) << (hw_queue));
+ }
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/rtlwifi/rtl8723be/trx.h
new file mode 100644
index 000000000000..102f33dcc988
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.h
@@ -0,0 +1,617 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_TRX_H__
+#define __RTL8723BE_TRX_H__
+
+#define TX_DESC_SIZE 40
+#define TX_DESC_AGGR_SUBFRAME_SIZE 32
+
+#define RX_DESC_SIZE 32
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+#define TX_DESC_NEXT_DESC_OFFSET 40
+#define USB_HWDESC_HEADER_LEN 40
+#define CRCLENGTH 4
+
+#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+#define SET_TX_DESC_OFFSET(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+#define SET_TX_DESC_BMC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+#define SET_TX_DESC_HTC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+#define SET_TX_DESC_LINIP(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+#define SET_TX_DESC_GF(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_TX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_TX_DESC_PKT_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+#define GET_TX_DESC_OFFSET(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+#define GET_TX_DESC_BMC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+#define GET_TX_DESC_HTC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+#define GET_TX_DESC_LAST_SEG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_TX_DESC_FIRST_SEG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_TX_DESC_LINIP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_TX_DESC_NO_ACM(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_TX_DESC_GF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_TX_DESC_OWN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_TX_DESC_MACID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val)
+#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+#define SET_TX_DESC_PIFS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val)
+#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val)
+
+
+#define SET_TX_DESC_PAID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val)
+#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val)
+#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
+#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
+#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
+#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+#define SET_TX_DESC_RAW(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+#define SET_TX_DESC_BT_INT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
+#define SET_TX_DESC_GID(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val)
+
+
+#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val)
+#define SET_TX_DESC_CHK_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val)
+#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val)
+#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val)
+#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val)
+#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val)
+#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val)
+#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val)
+#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val)
+#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val)
+#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val)
+#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val)
+#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val)
+#define SET_TX_DESC_NDPA(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val)
+#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val)
+
+
+#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val)
+#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val)
+
+
+#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 4, 1, __val)
+#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val)
+#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val)
+#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val)
+#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val)
+#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+
+
+#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+
+#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+
+#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val)
+
+#define SET_TX_DESC_SEQ(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val)
+
+#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+
+#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+
+
+#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val)
+
+#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+48, 0, 32)
+
+#define GET_RX_DESC_PKT_LEN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+#define GET_RX_DESC_CRC32(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+#define GET_RX_DESC_ICV(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+#define GET_RX_DESC_QOS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+#define GET_RX_DESC_PHYST(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_RX_DESC_LS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_RX_DESC_FS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_RX_DESC_EOR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_RX_DESC_OWN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+#define SET_RX_DESC_EOR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_RX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_RX_DESC_MACID(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 0, 7)
+#define GET_RX_DESC_TID(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 8, 4)
+#define GET_RX_DESC_AMSDU(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+#define GET_RX_DESC_PAGGR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+#define GET_RX_DESC_CHKERR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+#define GET_RX_DESC_IPVER(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 22, 1)
+#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 23, 1)
+#define GET_RX_DESC_PAM(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+#define GET_RX_DESC_MD(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+#define GET_RX_DESC_MF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+
+
+#define GET_RX_DESC_SEQ(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
+#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 18, 6)
+#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
+
+
+#define GET_RX_DESC_RXMCS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
+#define GET_RX_DESC_RXHT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
+#define GET_RX_DESC_HTC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+#define GET_RX_STATUS_DESC_EOSP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 11, 1)
+#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 12, 2)
+
+#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 29, 1)
+#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 30, 1)
+#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 31, 1)
+
+#define GET_RX_DESC_SPLCP(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 0, 1)
+#define GET_RX_STATUS_DESC_LDPC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 1, 1)
+#define GET_RX_STATUS_DESC_STBC(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 2, 1)
+#define GET_RX_DESC_BW(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+16, 4, 2)
+
+#define GET_RX_DESC_TSFL(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+
+#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+
+#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+
+
+/* TX report 2 format in Rx desc*/
+
+#define GET_RX_RPT2_DESC_PKT_LEN(__rxstatusdesc) \
+ LE_BITS_TO_4BYTE(__rxstatusdesc, 0, 9)
+#define GET_RX_RPT2_DESC_MACID_VALID_1(__rxstatusdesc) \
+ LE_BITS_TO_4BYTE(__rxstatusdesc+16, 0, 32)
+#define GET_RX_RPT2_DESC_MACID_VALID_2(__rxstatusdesc) \
+ LE_BITS_TO_4BYTE(__rxstatusdesc+20, 0, 32)
+
+#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
+#define SET_EARLYMODE_LEN0(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
+#define SET_EARLYMODE_LEN1(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
+#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
+#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
+#define SET_EARLYMODE_LEN3(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
+#define SET_EARLYMODE_LEN4(__paddr, __value) \
+ SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
+
+#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
+do { \
+ if (_size > TX_DESC_NEXT_DESC_OFFSET) \
+ memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
+ else \
+ memset(__pdesc, 0, _size); \
+} while (0)
+
+struct phy_rx_agc_info_t {
+ #ifdef __LITTLE_ENDIAN
+ u8 gain:7, trsw:1;
+ #else
+ u8 trsw:1, gain:7;
+ #endif
+};
+struct phy_status_rpt {
+ struct phy_rx_agc_info_t path_agc[2];
+ u8 ch_corr[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 rsvd_1;/* ch_corr_msb; */
+ u8 noise_power_db_msb;
+ char path_cfotail[2];
+ u8 pcts_mask[2];
+ char stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 noise_power_db_lsb;
+ u8 rsvd_2[3];
+ u8 stream_csi[2];
+ u8 stream_target_csi[2];
+ u8 sig_evm;
+ u8 rsvd_3;
+#ifdef __LITTLE_ENDIAN
+ u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 idle_long:1;
+ u8 r_ant_train_en:1;
+ u8 ant_sel_b:1;
+ u8 ant_sel:1;
+#else /* _BIG_ENDIAN_ */
+ u8 ant_sel:1;
+ u8 ant_sel_b:1;
+ u8 r_ant_train_en:1;
+ u8 idle_long:1;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
+#endif
+} __packed;
+
+struct rx_fwinfo_8723be {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ char rxevm[2];
+ char rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+struct tx_desc_8723be {
+ u32 pktsize:16;
+ u32 offset:8;
+ u32 bmc:1;
+ u32 htc:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 linip:1;
+ u32 noacm:1;
+ u32 gf:1;
+ u32 own:1;
+
+ u32 macid:6;
+ u32 rsvd0:2;
+ u32 queuesel:5;
+ u32 rd_nav_ext:1;
+ u32 lsig_txop_en:1;
+ u32 pifs:1;
+ u32 rateid:4;
+ u32 nav_usehdr:1;
+ u32 en_descid:1;
+ u32 sectype:2;
+ u32 pktoffset:8;
+
+ u32 rts_rc:6;
+ u32 data_rc:6;
+ u32 agg_en:1;
+ u32 rdg_en:1;
+ u32 bar_retryht:2;
+ u32 agg_break:1;
+ u32 morefrag:1;
+ u32 raw:1;
+ u32 ccx:1;
+ u32 ampdudensity:3;
+ u32 bt_int:1;
+ u32 ant_sela:1;
+ u32 ant_selb:1;
+ u32 txant_cck:2;
+ u32 txant_l:2;
+ u32 txant_ht:2;
+
+ u32 nextheadpage:8;
+ u32 tailpage:8;
+ u32 seq:12;
+ u32 cpu_handle:1;
+ u32 tag1:1;
+ u32 trigger_int:1;
+ u32 hwseq_en:1;
+
+ u32 rtsrate:5;
+ u32 apdcfe:1;
+ u32 qos:1;
+ u32 hwseq_ssn:1;
+ u32 userrate:1;
+ u32 dis_rtsfb:1;
+ u32 dis_datafb:1;
+ u32 cts2self:1;
+ u32 rts_en:1;
+ u32 hwrts_en:1;
+ u32 portid:1;
+ u32 pwr_status:3;
+ u32 waitdcts:1;
+ u32 cts2ap_en:1;
+ u32 txsc:2;
+ u32 stbc:2;
+ u32 txshort:1;
+ u32 txbw:1;
+ u32 rtsshort:1;
+ u32 rtsbw:1;
+ u32 rtssc:2;
+ u32 rtsstbc:2;
+
+ u32 txrate:6;
+ u32 shortgi:1;
+ u32 ccxt:1;
+ u32 txrate_fb_lmt:5;
+ u32 rtsrate_fb_lmt:4;
+ u32 retrylmt_en:1;
+ u32 txretrylmt:6;
+ u32 usb_txaggnum:8;
+
+ u32 txagca:5;
+ u32 txagcb:5;
+ u32 usemaxlen:1;
+ u32 maxaggnum:5;
+ u32 mcsg1maxlen:4;
+ u32 mcsg2maxlen:4;
+ u32 mcsg3maxlen:4;
+ u32 mcs7sgimaxlen:4;
+
+ u32 txbuffersize:16;
+ u32 sw_offset30:8;
+ u32 sw_offset31:4;
+ u32 rsvd1:1;
+ u32 antsel_c:1;
+ u32 null_0:1;
+ u32 null_1:1;
+
+ u32 txbuffaddr;
+ u32 txbufferaddr64;
+ u32 nextdescaddress;
+ u32 nextdescaddress64;
+
+ u32 reserve_pass_pcie_mm_limit[4];
+} __packed;
+
+struct rx_desc_8723be {
+ u32 length:14;
+ u32 crc32:1;
+ u32 icverror:1;
+ u32 drv_infosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phystatus:1;
+ u32 swdec:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 eor:1;
+ u32 own:1;
+
+ u32 macid:6;
+ u32 tid:4;
+ u32 hwrsvd:5;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1_fit:4;
+ u32 a2_fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 moredata:1;
+ u32 morefrag:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ u32 seq:12;
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 rsvd:1;
+
+ u32 rxmcs:6;
+ u32 rxht:1;
+ u32 amsdu:1;
+ u32 splcp:1;
+ u32 bandwidth:1;
+ u32 htc:1;
+ u32 tcpchk_rpt:1;
+ u32 ipcchk_rpt:1;
+ u32 tcpchk_valid:1;
+ u32 hwpcerr:1;
+ u32 hwpcind:1;
+ u32 iv0:16;
+
+ u32 iv1;
+
+ u32 tsfl;
+
+ u32 bufferaddress;
+ u32 bufferaddress64;
+
+} __packed;
+
+void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
+bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *status,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
+u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
+void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
+void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/rtlwifi/rtl8723com/Makefile
new file mode 100644
index 000000000000..345a68adcf38
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/Makefile
@@ -0,0 +1,9 @@
+rtl8723-common-objs := \
+ main.o \
+ dm_common.o \
+ fw_common.o \
+ phy_common.o
+
+obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
new file mode 100644
index 000000000000..4e254b72bf45
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
@@ -0,0 +1,65 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "dm_common.h"
+#include "../rtl8723ae/dm.h"
+#include <linux/module.h>
+
+/* These routines are common to RTL8723AE and RTL8723bE */
+
+void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dynamic_txpower_enable = false;
+
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_txpower);
+
+void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.current_turbo_edca = false;
+ rtlpriv->dm.is_any_nonbepkts = false;
+ rtlpriv->dm.is_cur_rdlstate = false;
+}
+EXPORT_SYMBOL_GPL(rtl8723_dm_init_edca_turbo);
+
+void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
+ rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
+ rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
+ rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
+ rtlpriv->dm_pstable.rssi_val_min = 0;
+ rtlpriv->dm_pstable.initialize = 0;
+}
+EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_bb_powersaving);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
new file mode 100644
index 000000000000..5c1b94ce2f86
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __DM_COMMON_H__
+#define __DM_COMMON_H__
+
+void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw);
+void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
new file mode 100644
index 000000000000..540278ff462b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
@@ -0,0 +1,329 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "fw_common.h"
+#include <linux/module.h>
+
+void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+
+ if (enable) {
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+ } else {
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723_enable_fw_download);
+
+void rtl8723_fw_block_write(struct ieee80211_hw *hw,
+ const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blocksize = sizeof(u32);
+ u8 *bufferptr = (u8 *)buffer;
+ u32 *pu4byteptr = (u32 *)buffer;
+ u32 i, offset, blockcount, remainsize;
+
+ blockcount = size / blocksize;
+ remainsize = size % blocksize;
+
+ for (i = 0; i < blockcount; i++) {
+ offset = i * blocksize;
+ rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
+ *(pu4byteptr + i));
+ }
+ if (remainsize) {
+ offset = blockcount * blocksize;
+ bufferptr += offset;
+ for (i = 0; i < remainsize; i++) {
+ rtl_write_byte(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset + i),
+ *(bufferptr + i));
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723_fw_block_write);
+
+void rtl8723_fw_page_write(struct ieee80211_hw *hw,
+ u32 page, const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value8;
+ u8 u8page = (u8) (page & 0x07);
+
+ value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+ rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+ rtl8723_fw_block_write(hw, buffer, size);
+}
+EXPORT_SYMBOL_GPL(rtl8723_fw_page_write);
+
+static void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+ u32 fwlen = *pfwlen;
+ u8 remain = (u8) (fwlen % 4);
+
+ remain = (remain == 0) ? 0 : (4 - remain);
+
+ while (remain > 0) {
+ pfwbuf[fwlen] = 0;
+ fwlen++;
+ remain--;
+ }
+ *pfwlen = fwlen;
+}
+
+void rtl8723_write_fw(struct ieee80211_hw *hw,
+ enum version_8723e version,
+ u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *bufferptr = buffer;
+ u32 pagenums, remainsize;
+ u32 page, offset;
+
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
+
+ rtl8723_fill_dummy(bufferptr, &size);
+
+ pagenums = size / FW_8192C_PAGE_SIZE;
+ remainsize = size % FW_8192C_PAGE_SIZE;
+
+ if (pagenums > 8) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Page numbers should not greater then 8\n");
+ }
+ for (page = 0; page < pagenums; page++) {
+ offset = page * FW_8192C_PAGE_SIZE;
+ rtl8723_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8192C_PAGE_SIZE);
+ }
+ if (remainsize) {
+ offset = pagenums * FW_8192C_PAGE_SIZE;
+ page = pagenums;
+ rtl8723_fw_page_write(hw, page, (bufferptr + offset),
+ remainsize);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723_write_fw);
+
+void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ u8 u1tmp;
+ u8 delay = 100;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+ u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+
+ while (u1tmp & BIT(2)) {
+ delay--;
+ if (delay == 0)
+ break;
+ udelay(50);
+ u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ }
+ if (delay == 0) {
+ u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723ae_firmware_selfreset);
+
+void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ u8 u1b_tmp;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
+ udelay(50);
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp | BIT(0)));
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2)));
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ " _8051Reset8723be(): 8051 reset success .\n");
+}
+EXPORT_SYMBOL_GPL(rtl8723be_firmware_selfreset);
+
+int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err = -EIO;
+ u32 counter = 0;
+ u32 value32;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
+ (!(value32 & FWDL_CHKSUM_RPT)));
+
+ if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "chksum report fail ! REG_MCUFWDL:0x%08x .\n",
+ value32);
+ goto exit;
+ }
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
+
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL) | MCUFWDL_RDY;
+ value32 &= ~WINTINI_RDY;
+ rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+ if (is_8723be)
+ rtl8723be_firmware_selfreset(hw);
+ counter = 0;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ if (value32 & WINTINI_RDY) {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ "Polling FW ready success!! "
+ "REG_MCUFWDL:0x%08x .\n",
+ value32);
+ err = 0;
+ goto exit;
+ }
+ udelay(FW_8192C_POLLING_DELAY);
+
+ } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
+
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
+ value32);
+
+exit:
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtl8723_fw_free_to_go);
+
+int rtl8723_download_fw(struct ieee80211_hw *hw,
+ bool is_8723be)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl92c_firmware_header *pfwheader;
+ u8 *pfwdata;
+ u32 fwsize;
+ int err;
+ enum version_8723e version = rtlhal->version;
+
+ if (!rtlhal->pfirmware)
+ return 1;
+
+ pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
+ fwsize = rtlhal->fwsize;
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ "normal Firmware SIZE %d\n", fwsize);
+
+ if (rtlpriv->cfg->ops->is_fw_header(pfwheader)) {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
+ "Firmware Version(%d), Signature(%#x), Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtl92c_firmware_header));
+
+ pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
+ fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+ }
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+ if (is_8723be)
+ rtl8723be_firmware_selfreset(hw);
+ else
+ rtl8723ae_firmware_selfreset(hw);
+ }
+ rtl8723_enable_fw_download(hw, true);
+ rtl8723_write_fw(hw, version, pfwdata, fwsize);
+ rtl8723_enable_fw_download(hw, false);
+
+ err = rtl8723_fw_free_to_go(hw, is_8723be);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "Firmware is not ready to run!\n");
+ } else {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
+ "Firmware is ready to run!\n");
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8723_download_fw);
+
+bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ struct sk_buff *pskb = NULL;
+ u8 own;
+ unsigned long flags;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ pdesc = &ring->desc[0];
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);
+
+ rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
+
+ __skb_queue_tail(&ring->queue, skb);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(rtl8723_cmd_send_packet);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
new file mode 100644
index 000000000000..cf1cc5804d06
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
@@ -0,0 +1,126 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __FW_COMMON_H__
+#define __FW_COMMON_H__
+
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_MCUFWDL 0x0080
+#define FW_8192C_START_ADDRESS 0x1000
+#define FW_8192C_PAGE_SIZE 4096
+#define FW_8192C_POLLING_TIMEOUT_COUNT 6000
+#define FW_8192C_POLLING_DELAY 5
+
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_CHKSUM_RPT BIT(2)
+#define WINTINI_RDY BIT(6)
+
+#define REG_RSV_CTRL 0x001C
+#define REG_HMETFR 0x01CC
+
+enum version_8723e {
+ VERSION_TEST_UMC_CHIP_8723 = 0x0081,
+ VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
+ VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
+ VERSION_TEST_CHIP_1T1R_8723B = 0x0106,
+ VERSION_NORMAL_SMIC_CHIP_1T1R_8723B = 0x010E,
+ VERSION_UNKNOWN = 0xFF,
+};
+
+enum rtl8723ae_h2c_cmd {
+ H2C_AP_OFFLOAD = 0,
+ H2C_SETPWRMODE = 1,
+ H2C_JOINBSSRPT = 2,
+ H2C_RSVDPAGE = 3,
+ H2C_RSSI_REPORT = 4,
+ H2C_P2P_PS_CTW_CMD = 5,
+ H2C_P2P_PS_OFFLOAD = 6,
+ H2C_RA_MASK = 7,
+ MAX_H2CCMD
+};
+
+enum rtl8723be_cmd {
+ H2C_8723BE_RSVDPAGE = 0,
+ H2C_8723BE_JOINBSSRPT = 1,
+ H2C_8723BE_SCAN = 2,
+ H2C_8723BE_KEEP_ALIVE_CTRL = 3,
+ H2C_8723BE_DISCONNECT_DECISION = 4,
+ H2C_8723BE_INIT_OFFLOAD = 6,
+ H2C_8723BE_AP_OFFLOAD = 8,
+ H2C_8723BE_BCN_RSVDPAGE = 9,
+ H2C_8723BE_PROBERSP_RSVDPAGE = 10,
+
+ H2C_8723BE_SETPWRMODE = 0x20,
+ H2C_8723BE_PS_TUNING_PARA = 0x21,
+ H2C_8723BE_PS_TUNING_PARA2 = 0x22,
+ H2C_8723BE_PS_LPS_PARA = 0x23,
+ H2C_8723BE_P2P_PS_OFFLOAD = 0x24,
+
+ H2C_8723BE_WO_WLAN = 0x80,
+ H2C_8723BE_REMOTE_WAKE_CTRL = 0x81,
+ H2C_8723BE_AOAC_GLOBAL_INFO = 0x82,
+ H2C_8723BE_AOAC_RSVDPAGE = 0x83,
+ H2C_8723BE_RSSI_REPORT = 0x42,
+ H2C_8723BE_RA_MASK = 0x40,
+ H2C_8723BE_SELECTIVE_SUSPEND_ROF_CMD,
+ H2C_8723BE_P2P_PS_MODE,
+ H2C_8723BE_PSD_RESULT,
+ /*Not defined CTW CMD for P2P yet*/
+ H2C_8723BE_P2P_PS_CTW_CMD,
+ MAX_8723BE_H2CCMD
+};
+
+struct rtl92c_firmware_header {
+ u16 signature;
+ u8 category;
+ u8 function;
+ u16 version;
+ u8 subversion;
+ u8 rsvd1;
+ u8 month;
+ u8 date;
+ u8 hour;
+ u8 minute;
+ u16 ramcodesize;
+ u16 rsvd2;
+ u32 svnindex;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable);
+void rtl8723_fw_block_write(struct ieee80211_hw *hw,
+ const u8 *buffer, u32 size);
+void rtl8723_fw_page_write(struct ieee80211_hw *hw,
+ u32 page, const u8 *buffer, u32 size);
+void rtl8723_write_fw(struct ieee80211_hw *hw,
+ enum version_8723e version,
+ u8 *buffer, u32 size);
+int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be);
+int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/rtlwifi/rtl8723com/main.c
new file mode 100644
index 000000000000..9014a94fac6a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/main.c
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include <linux/module.h>
+
+
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek RTL8723AE/RTL8723BE 802.11n PCI wireless common routines");
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
new file mode 100644
index 000000000000..d73b659bd2b5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
@@ -0,0 +1,434 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "phy_common.h"
+#include "../rtl8723ae/reg.h"
+#include <linux/module.h>
+
+/* These routines are common to RTL8723AE and RTL8723bE */
+
+u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 returnvalue, originalvalue, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK = 0x%x Addr[0x%x]= 0x%x\n",
+ bitmask, regaddr, originalvalue);
+
+ return returnvalue;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_query_bb_reg);
+
+void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 originalvalue, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+ rtl_write_dword(rtlpriv, regaddr, data);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg);
+
+u32 rtl8723_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((bitmask >> i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift);
+
+u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 newoffset;
+ u32 tmplong, tmplong2;
+ u8 rfpi_enable = 0;
+ u32 retvalue;
+
+ offset &= 0xff;
+ newoffset = offset;
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+ return 0xFFFFFFFF;
+ }
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+ if (rfpath == RF90_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+ tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+ (newoffset << 23) | BLSSIREADEDGE;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong & (~BLSSIREADEDGE));
+ mdelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+ mdelay(2);
+ if (rfpath == RF90_PATH_A)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ else if (rfpath == RF90_PATH_B)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+ BIT(8));
+ if (rfpi_enable)
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
+ BLSSIREADBACKDATA);
+ else
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
+ BLSSIREADBACKDATA);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFR-%d Addr[0x%x]= 0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
+ return retvalue;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_read);
+
+void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 offset, u32 data)
+{
+ u32 data_and_addr;
+ u32 newoffset;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+ return;
+ }
+ offset &= 0xff;
+ newoffset = offset;
+ data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]= 0x%x\n", rfpath,
+ pphyreg->rf3wire_offset, data_and_addr);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_write);
+
+long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx)
+{
+ long offset;
+ long pwrout_dbm;
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ default:
+ offset = -8;
+ break;
+ }
+ pwrout_dbm = txpwridx / 2 + offset;
+ return pwrout_dbm;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_txpwr_idx_to_dbm);
+
+void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+ RFPGA0_XA_LSSIPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+ RFPGA0_XB_LSSIPARAMETER;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_init_bb_rf_reg_def);
+
+bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+ u32 cmdtableidx,
+ u32 cmdtablesz,
+ enum swchnlcmd_id cmdid,
+ u32 para1, u32 para2,
+ u32 msdelay)
+{
+ struct swchnlcmd *pcmd;
+
+ if (cmdtable == NULL) {
+ RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+ return false;
+ }
+
+ if (cmdtableidx >= cmdtablesz)
+ return false;
+
+ pcmd = cmdtable + cmdtableidx;
+ pcmd->cmdid = cmdid;
+ pcmd->para1 = para1;
+ pcmd->para2 = para2;
+ pcmd->msdelay = msdelay;
+ return true;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_set_sw_chnl_cmdarray);
+
+void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok,
+ long result[][8],
+ u8 final_candidate,
+ bool btxonly)
+{
+ u32 oldval_0, x, tx0_a, reg;
+ long y, tx0_c;
+
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (iqk_ok) {
+ oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD) >> 22) & 0x3FF;
+ x = result[final_candidate][0];
+ if ((x & 0x00000200) != 0)
+ x = x | 0xFFFFFC00;
+ tx0_a = (x * oldval_0) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
+ ((x * oldval_0 >> 7) & 0x1));
+ y = result[final_candidate][1];
+ if ((y & 0x00000200) != 0)
+ y = y | 0xFFFFFC00;
+ tx0_c = (y * oldval_0) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
+ ((tx0_c & 0x3C0) >> 6));
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
+ (tx0_c & 0x3F));
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
+ ((y * oldval_0 >> 7) & 0x1));
+ if (btxonly)
+ return;
+ reg = result[final_candidate][2];
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_fill_iqk_matrix);
+
+void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg,
+ u32 *addabackup, u32 registernum)
+{
+ u32 i;
+
+ for (i = 0; i < registernum; i++)
+ addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
+}
+EXPORT_SYMBOL_GPL(rtl8723_save_adda_registers);
+
+void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+ macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_save_mac_registers);
+
+void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw,
+ u32 *addareg, u32 *addabackup,
+ u32 regiesternum)
+{
+ u32 i;
+
+ for (i = 0; i < regiesternum; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_reload_adda_registers);
+
+void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
+ rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_reload_mac_registers);
+
+void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
+ bool is_patha_on, bool is2t)
+{
+ u32 pathon;
+ u32 i;
+
+ pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
+ if (!is2t) {
+ pathon = 0x0bdb25a0;
+ rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
+ } else {
+ rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
+ }
+
+ for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_path_adda_on);
+
+void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i = 0;
+
+ rtl_write_byte(rtlpriv, macreg[i], 0x3F);
+
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i],
+ (u8) (macbackup[i] & (~BIT(3))));
+ rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_mac_setting_calibration);
+
+void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw)
+{
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_standby);
+
+void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+ u32 mode;
+
+ mode = pi_mode ? 0x01000100 : 0x01000000;
+ rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+ rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_pi_mode_switch);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
new file mode 100644
index 000000000000..83b891a9adb8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
@@ -0,0 +1,89 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __PHY_COMMON__
+#define __PHY_COMMON__
+
+#define RT_CANNOT_IO(hw) false
+
+enum swchnlcmd_id {
+ CMDID_END,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ CMDID_BBREGWRITE10,
+ CMDID_WRITEPORT_ULONG,
+ CMDID_WRITEPORT_USHORT,
+ CMDID_WRITEPORT_UCHAR,
+ CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+ enum swchnlcmd_id cmdid;
+ u32 para1;
+ u32 para2;
+ u32 msdelay;
+};
+
+u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+ u32 bitmask, u32 data);
+u32 rtl8723_phy_calculate_bit_shift(u32 bitmask);
+u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 offset, u32 data);
+long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx);
+void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
+bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+ u32 cmdtableidx,
+ u32 cmdtablesz,
+ enum swchnlcmd_id cmdid,
+ u32 para1, u32 para2,
+ u32 msdelay);
+void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok,
+ long result[][8],
+ u8 final_candidate,
+ bool btxonly);
+void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg,
+ u32 *addabackup, u32 registernum);
+void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup);
+void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw,
+ u32 *addareg, u32 *addabackup,
+ u32 regiesternum);
+void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup);
+void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
+ bool is_patha_on, bool is2t);
+void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup);
+void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw);
+void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 4933f02ce1d5..0398d3ea15b0 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -410,7 +410,7 @@ static void rtl_usb_init_sw(struct ieee80211_hw *hw)
mac->current_ampdu_factor = 3;
/* QOS */
- rtlusb->acm_method = eAcmWay2_SW;
+ rtlusb->acm_method = EACMWAY2_SW;
/* IRQ */
/* HIMR - turn all on */
@@ -994,7 +994,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
seq_number += 1;
seq_number <<= 4;
}
- rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, NULL, info, sta, skb,
hw_queue, &tcb_desc);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
if (qc)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 8c647391bedf..6965afdf572a 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -41,6 +41,38 @@
#include <linux/completion.h>
#include "debug.h"
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASKOFDM_D 0xffc00000
+#define MASKCCK 0x3f3f3f3f
+
+#define MASK4BITS 0x0f
+#define MASK20BITS 0xfffff
+#define RFREG_OFFSET_MASK 0xfffff
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASKOFDM_D 0xffc00000
+#define MASKCCK 0x3f3f3f3f
+
+#define MASK4BITS 0x0f
+#define MASK20BITS 0xfffff
+#define RFREG_OFFSET_MASK 0xfffff
+
#define RF_CHANGE_BY_INIT 0
#define RF_CHANGE_BY_IPS BIT(28)
#define RF_CHANGE_BY_PS BIT(29)
@@ -49,6 +81,7 @@
#define IQK_ADDA_REG_NUM 16
#define IQK_MAC_REG_NUM 4
+#define IQK_THRESHOLD 8
#define MAX_KEY_LEN 61
#define KEY_BUF_SIZE 5
@@ -86,7 +119,18 @@
#define MAC80211_4ADDR_LEN 30
#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */
+#define CHANNEL_MAX_NUMBER_2G 14
+#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
+ *"phy_GetChnlGroup8812A" and
+ * "Hal_ReadTxPowerInfo8812A"
+ */
+#define CHANNEL_MAX_NUMBER_5G_80M 7
#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */
+#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
+ *"phy_GetChnlGroup8812A" and
+ * "Hal_ReadTxPowerInfo8812A"
+ */
+#define CHANNEL_MAX_NUMBER_5G_80M 7
#define MAX_PG_GROUP 13
#define CHANNEL_GROUP_MAX_2G 3
#define CHANNEL_GROUP_IDX_5GL 3
@@ -96,6 +140,7 @@
#define CHANNEL_MAX_NUMBER_2G 14
#define AVG_THERMAL_NUM 8
#define AVG_THERMAL_NUM_88E 4
+#define AVG_THERMAL_NUM_8723BE 4
#define MAX_TID_COUNT 9
/* for early mode */
@@ -107,6 +152,24 @@
#define MAX_CHNL_GROUP_24G 6
#define MAX_CHNL_GROUP_5G 14
+#define TX_PWR_BY_RATE_NUM_BAND 2
+#define TX_PWR_BY_RATE_NUM_RF 4
+#define TX_PWR_BY_RATE_NUM_SECTION 12
+#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6
+#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5
+
+#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
+
+#define DEL_SW_IDX_SZ 30
+#define BAND_NUM 3
+
+enum rf_tx_num {
+ RF_1TX = 0,
+ RF_2TX,
+ RF_MAX_TX_NUM,
+ RF_TX_NUM_NONIMPLEMENT,
+};
+
struct txpower_info_2g {
u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
@@ -115,6 +178,8 @@ struct txpower_info_2g {
u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
};
struct txpower_info_5g {
@@ -123,6 +188,17 @@ struct txpower_info_5g {
u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
+ u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+enum rate_section {
+ CCK = 0,
+ OFDM,
+ HT_MCS0_MCS7,
+ HT_MCS8_MCS15,
+ VHT_1SSMCS0_1SSMCS9,
+ VHT_2SSMCS0_2SSMCS9,
};
enum intf_type {
@@ -158,7 +234,10 @@ enum hardware_type {
HARDWARE_TYPE_RTL8192DU,
HARDWARE_TYPE_RTL8723AE,
HARDWARE_TYPE_RTL8723U,
+ HARDWARE_TYPE_RTL8723BE,
HARDWARE_TYPE_RTL8188EE,
+ HARDWARE_TYPE_RTL8821AE,
+ HARDWARE_TYPE_RTL8812AE,
/* keep it last */
HARDWARE_TYPE_NUM
@@ -195,8 +274,16 @@ enum hardware_type {
_pdesc->rxmcs == DESC92_RATE5_5M || \
_pdesc->rxmcs == DESC92_RATE11M)
+#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \
+ ((rxmcs) == DESC92_RATE1M || \
+ (rxmcs) == DESC92_RATE2M || \
+ (rxmcs) == DESC92_RATE5_5M || \
+ (rxmcs) == DESC92_RATE11M)
+
enum scan_operation_backup_opt {
SCAN_OPT_BACKUP = 0,
+ SCAN_OPT_BACKUP_BAND0 = 0,
+ SCAN_OPT_BACKUP_BAND1,
SCAN_OPT_RESTORE,
SCAN_OPT_MAX
};
@@ -231,7 +318,9 @@ struct bb_reg_def {
enum io_type {
IO_CMD_PAUSE_DM_BY_SCAN = 0,
- IO_CMD_RESUME_DM_BY_SCAN = 1,
+ IO_CMD_PAUSE_BAND0_DM_BY_SCAN = 0,
+ IO_CMD_PAUSE_BAND1_DM_BY_SCAN = 1,
+ IO_CMD_RESUME_DM_BY_SCAN = 2,
};
enum hw_variables {
@@ -298,6 +387,7 @@ enum hw_variables {
HW_VAR_SET_RPWM,
HW_VAR_H2C_FW_PWRMODE,
HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_H2C_FW_MEDIASTATUSRPT,
HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
HW_VAR_FW_PSMODE_STATUS,
HW_VAR_RESUME_CLK_ON,
@@ -330,6 +420,8 @@ enum hw_variables {
HAL_DEF_WOWLAN,
HW_VAR_MRC,
+ HW_VAR_KEEP_ALIVE,
+ HW_VAR_NAV_UPPER,
HW_VAR_MGT_FILTER,
HW_VAR_CTRL_FILTER,
@@ -348,34 +440,34 @@ enum rt_oem_id {
RT_CID_8187_HW_LED = 3,
RT_CID_8187_NETGEAR = 4,
RT_CID_WHQL = 5,
- RT_CID_819x_CAMEO = 6,
- RT_CID_819x_RUNTOP = 7,
- RT_CID_819x_Senao = 8,
+ RT_CID_819X_CAMEO = 6,
+ RT_CID_819X_RUNTOP = 7,
+ RT_CID_819X_SENAO = 8,
RT_CID_TOSHIBA = 9,
- RT_CID_819x_Netcore = 10,
- RT_CID_Nettronix = 11,
+ RT_CID_819X_NETCORE = 10,
+ RT_CID_NETTRONIX = 11,
RT_CID_DLINK = 12,
RT_CID_PRONET = 13,
RT_CID_COREGA = 14,
- RT_CID_819x_ALPHA = 15,
- RT_CID_819x_Sitecom = 16,
+ RT_CID_819X_ALPHA = 15,
+ RT_CID_819X_SITECOM = 16,
RT_CID_CCX = 17,
- RT_CID_819x_Lenovo = 18,
- RT_CID_819x_QMI = 19,
- RT_CID_819x_Edimax_Belkin = 20,
- RT_CID_819x_Sercomm_Belkin = 21,
- RT_CID_819x_CAMEO1 = 22,
- RT_CID_819x_MSI = 23,
- RT_CID_819x_Acer = 24,
- RT_CID_819x_HP = 27,
- RT_CID_819x_CLEVO = 28,
- RT_CID_819x_Arcadyan_Belkin = 29,
- RT_CID_819x_SAMSUNG = 30,
- RT_CID_819x_WNC_COREGA = 31,
- RT_CID_819x_Foxcoon = 32,
- RT_CID_819x_DELL = 33,
- RT_CID_819x_PRONETS = 34,
- RT_CID_819x_Edimax_ASUS = 35,
+ RT_CID_819X_LENOVO = 18,
+ RT_CID_819X_QMI = 19,
+ RT_CID_819X_EDIMAX_BELKIN = 20,
+ RT_CID_819X_SERCOMM_BELKIN = 21,
+ RT_CID_819X_CAMEO1 = 22,
+ RT_CID_819X_MSI = 23,
+ RT_CID_819X_ACER = 24,
+ RT_CID_819X_HP = 27,
+ RT_CID_819X_CLEVO = 28,
+ RT_CID_819X_ARCADYAN_BELKIN = 29,
+ RT_CID_819X_SAMSUNG = 30,
+ RT_CID_819X_WNC_COREGA = 31,
+ RT_CID_819X_FOXCOON = 32,
+ RT_CID_819X_DELL = 33,
+ RT_CID_819X_PRONETS = 34,
+ RT_CID_819X_EDIMAX_ASUS = 35,
RT_CID_NETGEAR = 36,
RT_CID_PLANEX = 37,
RT_CID_CC_C = 38,
@@ -389,6 +481,7 @@ enum hw_descs {
HW_DESC_RXBUFF_ADDR,
HW_DESC_RXPKT_LEN,
HW_DESC_RXERO,
+ HW_DESC_RX_PREPARE,
};
enum prime_sc {
@@ -407,6 +500,7 @@ enum rf_type {
enum ht_channel_width {
HT_CHANNEL_WIDTH_20 = 0,
HT_CHANNEL_WIDTH_20_40 = 1,
+ HT_CHANNEL_WIDTH_80 = 2,
};
/* Ref: 802.11i sepc D10.0 7.3.2.25.1
@@ -471,6 +565,9 @@ enum rtl_var_map {
MAC_RCR_ACRC32,
MAC_RCR_ACF,
MAC_RCR_AAP,
+ MAC_HIMR,
+ MAC_HIMRE,
+ MAC_HSISR,
/*efuse map */
EFUSE_TEST,
@@ -608,7 +705,7 @@ enum rtl_led_pin {
enum acm_method {
eAcmWay0_SwAndHw = 0,
eAcmWay1_HW = 1,
- eAcmWay2_SW = 2,
+ EACMWAY2_SW = 2,
};
enum macphy_mode {
@@ -645,7 +742,9 @@ enum wireless_mode {
WIRELESS_MODE_G = 0x04,
WIRELESS_MODE_AUTO = 0x08,
WIRELESS_MODE_N_24G = 0x10,
- WIRELESS_MODE_N_5G = 0x20
+ WIRELESS_MODE_N_5G = 0x20,
+ WIRELESS_MODE_AC_5G = 0x40,
+ WIRELESS_MODE_AC_24G = 0x80
};
#define IS_WIRELESS_MODE_A(wirelessmode) \
@@ -669,6 +768,8 @@ enum ratr_table_mode {
RATR_INX_WIRELESS_B = 6,
RATR_INX_WIRELESS_MC = 7,
RATR_INX_WIRELESS_A = 8,
+ RATR_INX_WIRELESS_AC_5N = 8,
+ RATR_INX_WIRELESS_AC_24N = 9,
};
enum rtl_link_state {
@@ -803,8 +904,12 @@ struct wireless_stats {
long signal_strength;
u8 rx_rssi_percentage[4];
+ u8 rx_evm_dbm[4];
u8 rx_evm_percentage[2];
+ u16 rx_cfo_short[4];
+ u16 rx_cfo_tail[4];
+
struct rt_smooth_data ui_rssi;
struct rt_smooth_data ui_link_quality;
};
@@ -817,9 +922,9 @@ struct rate_adaptive {
u32 high_rssi_thresh_for_ra;
u32 high2low_rssi_thresh_for_ra;
u8 low2high_rssi_thresh_for_ra40m;
- u32 low_rssi_thresh_for_ra40M;
+ u32 low_rssi_thresh_for_ra40m;
u8 low2high_rssi_thresh_for_ra20m;
- u32 low_rssi_thresh_for_ra20M;
+ u32 low_rssi_thresh_for_ra20m;
u32 upper_rssi_threshold_ratr;
u32 middleupper_rssi_threshold_ratr;
u32 middle_rssi_threshold_ratr;
@@ -833,6 +938,10 @@ struct rate_adaptive {
u32 ping_rssi_thresh_for_ra;
u32 last_ratr;
u8 pre_ratr_state;
+ u8 ldpc_thres;
+ bool use_ldpc;
+ bool lower_rts_rate;
+ bool is_special_data;
};
struct regd_pair_mapping {
@@ -841,6 +950,16 @@ struct regd_pair_mapping {
u16 reg_2ghz_ctl;
};
+struct dynamic_primary_cca {
+ u8 pricca_flag;
+ u8 intf_flag;
+ u8 intf_type;
+ u8 dup_rts_flag;
+ u8 monitor_flag;
+ u8 ch_offset;
+ u8 mf_state;
+};
+
struct rtl_regulatory {
char alpha2[2];
u16 country_code;
@@ -976,16 +1095,29 @@ struct rtl_phy {
u32 iqk_bb_backup[10];
bool iqk_initialized;
+ bool rfpath_rx_enable[MAX_RF_PATH];
+ u8 reg_837;
/* Dual mac */
bool need_iqk;
struct iqk_matrix_regs iqk_matrix[IQK_MATRIX_SETTINGS_NUM];
bool rfpi_enable;
+ bool iqk_in_progress;
u8 pwrgroup_cnt;
u8 cck_high_power;
/* MAX_PG_GROUP groups of pwr diff by rates */
u32 mcs_offset[MAX_PG_GROUP][16];
+ u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_SECTION];
+ u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_24G];
+ u8 txpwr_by_rate_base_5g[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_5G];
u8 default_initialgain[4];
/* the current Tx power level */
@@ -998,6 +1130,7 @@ struct rtl_phy {
bool apk_done;
u32 reg_rf3c[2]; /* pathA / pathB */
+ u32 backup_rf_0x1a;/*92ee*/
/* bfsync */
u8 framesync;
u32 framesync_c34;
@@ -1006,6 +1139,7 @@ struct rtl_phy {
struct phy_parameters hwparam_tables[MAX_TAB];
u16 rf_pathmap;
+ u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
enum rt_polarity_ctl polarity_ctl;
};
@@ -1133,6 +1267,7 @@ struct rtl_mac {
u8 use_cts_protect;
u8 cur_40_prime_sc;
u8 cur_40_prime_sc_bk;
+ u8 cur_80_prime_sc;
u64 tsf;
u8 retry_short;
u8 retry_long;
@@ -1213,6 +1348,7 @@ struct rtl_hal {
bool being_init_adapter;
bool bbrf_ready;
bool mac_func_enable;
+ bool pre_edcca_enable;
struct bt_coexist_8723 hal_coex_8723;
enum intf_type interface;
@@ -1234,6 +1370,7 @@ struct rtl_hal {
/*Reserve page start offset except beacon in TxQ. */
u8 fw_rsvdpage_startoffset;
u8 h2c_txcmd_seq;
+ u8 current_ra_rate;
/* FW Cmd IO related */
u16 fwcmd_iomap;
@@ -1273,6 +1410,9 @@ struct rtl_hal {
bool disable_amsdu_8k;
bool master_of_dmsp;
bool slave_of_dmsp;
+
+ u16 rx_tag;/*for 92ee*/
+ u8 rts_en;
};
struct rtl_security {
@@ -1321,6 +1461,16 @@ struct fast_ant_training {
bool becomelinked;
};
+struct dm_phy_dbg_info {
+ char rx_snrdb[4];
+ u64 num_qry_phy_status;
+ u64 num_qry_phy_status_cck;
+ u64 num_qry_phy_status_ofdm;
+ u16 num_qry_beacon_pkt;
+ u16 num_non_be_pkt;
+ s32 rx_evm[4];
+};
+
struct rtl_dm {
/*PHY status for Dynamic Management */
long entry_min_undec_sm_pwdb;
@@ -1360,29 +1510,84 @@ struct rtl_dm {
u8 txpower_track_control;
bool interrupt_migration;
bool disable_tx_int;
- char ofdm_index[2];
+ char ofdm_index[MAX_RF_PATH];
+ u8 default_ofdm_index;
+ u8 default_cck_index;
char cck_index;
- char delta_power_index;
- char delta_power_index_last;
- char power_index_offset;
+ char delta_power_index[MAX_RF_PATH];
+ char delta_power_index_last[MAX_RF_PATH];
+ char power_index_offset[MAX_RF_PATH];
+ char absolute_ofdm_swing_idx[MAX_RF_PATH];
+ char remnant_ofdm_swing_idx[MAX_RF_PATH];
+ char remnant_cck_idx;
+ bool modify_txagc_flag_path_a;
+ bool modify_txagc_flag_path_b;
+
+ bool one_entry_only;
+ struct dm_phy_dbg_info dbginfo;
+
+ /* Dynamic ATC switch */
+ bool atc_status;
+ bool large_cfo_hit;
+ bool is_freeze;
+ int cfo_tail[2];
+ int cfo_ave_pre;
+ int crystal_cap;
+ u8 cfo_threshold;
+ u32 packet_count;
+ u32 packet_count_pre;
+ u8 tx_rate;
/*88e tx power tracking*/
- u8 swing_idx_ofdm[2];
+ u8 swing_idx_ofdm[MAX_RF_PATH];
u8 swing_idx_ofdm_cur;
- u8 swing_idx_ofdm_base;
+ u8 swing_idx_ofdm_base[MAX_RF_PATH];
bool swing_flag_ofdm;
u8 swing_idx_cck;
u8 swing_idx_cck_cur;
u8 swing_idx_cck_base;
bool swing_flag_cck;
+ char swing_diff_2g;
+ char swing_diff_5g;
+
+ u8 delta_swing_table_idx_24gccka_p[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gccka_n[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gcckb_p[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gcckb_n[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24ga_p[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24ga_n[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gb_p[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24gb_n[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_5ga_p[BAND_NUM][DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_5ga_n[BAND_NUM][DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_5gb_p[BAND_NUM][DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_5gb_n[BAND_NUM][DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24ga_p_8188e[DEL_SW_IDX_SZ];
+ u8 delta_swing_table_idx_24ga_n_8188e[DEL_SW_IDX_SZ];
+
/* DMSP */
bool supp_phymode_switch;
+ /* DulMac */
struct fast_ant_training fat_table;
+
+ u8 resp_tx_path;
+ u8 path_sel;
+ u32 patha_sum;
+ u32 pathb_sum;
+ u32 patha_cnt;
+ u32 pathb_cnt;
+
+ u8 pre_channel;
+ u8 *p_channel;
+ u8 linked_interval;
+
+ u64 last_tx_ok_cnt;
+ u64 last_rx_ok_cnt;
};
-#define EFUSE_MAX_LOGICAL_SIZE 256
+#define EFUSE_MAX_LOGICAL_SIZE 512
struct rtl_efuse {
bool autoLoad_ok;
@@ -1422,12 +1627,9 @@ struct rtl_efuse {
u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
- u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
- u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
- u8 eprom_chnl_txpwr_ht40_2sdf[2][CHANNEL_GROUP_MAX];
- u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
- u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
- u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
+ u8 eeprom_chnlarea_txpwr_cck[MAX_RF_PATH][CHANNEL_GROUP_MAX_2G];
+ u8 eeprom_chnlarea_txpwr_ht40_1s[MAX_RF_PATH][CHANNEL_GROUP_MAX];
+ u8 eprom_chnl_txpwr_ht40_2sdf[MAX_RF_PATH][CHANNEL_GROUP_MAX];
u8 internal_pa_5g[2]; /* pathA / pathB */
u8 eeprom_c9;
@@ -1438,9 +1640,38 @@ struct rtl_efuse {
u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
- char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */
- /*For HT<->legacy pwr diff*/
- u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER];
+ u8 txpwrlevel_cck[MAX_RF_PATH][CHANNEL_MAX_NUMBER_2G];
+ /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+
+ /*--------------------------------------------------------*
+ * 8192CE\8192SE\8192DE\8723AE use the following 4 arrays,
+ * other ICs (8188EE\8723BE\8192EE\8812AE...)
+ * define new arrays in Windows code.
+ * BUT, in linux code, we use the same array for all ICs.
+ *
+ * The Correspondance relation between two arrays is:
+ * txpwr_cckdiff[][] == CCK_24G_Diff[][]
+ * txpwr_ht20diff[][] == BW20_24G_Diff[][]
+ * txpwr_ht40diff[][] == BW40_24G_Diff[][]
+ * txpwr_legacyhtdiff[][] == OFDM_24G_Diff[][]
+ *
+ * Sizes of these arrays are decided by the larger ones.
+ */
+ char txpwr_cckdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ char txpwr_ht20diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ char txpwr_ht40diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ char txpwr_legacyhtdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+
+ u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
+ char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
+ char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
+
u8 txpwr_safetyflag; /* Band edge enable flag */
u16 eeprom_txpowerdiff;
u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
@@ -1571,7 +1802,9 @@ struct rtl_stats {
bool rx_is40Mhzpacket;
u32 rx_pwdb_all;
u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
- s8 rx_mimo_sig_qual[2];
+ s8 rx_mimo_sig_qual[4];
+ u8 rx_pwr[4]; /* per-path's pwdb */
+ u8 rx_snr[4]; /* per-path's SNR */
bool packet_matchbssid;
bool is_cck;
bool is_ht;
@@ -1644,6 +1877,8 @@ struct rtl_tcb_desc {
bool btx_enable_sw_calc_duration;
};
+struct rtl92c_firmware_header;
+
struct rtl_hal_ops {
int (*init_sw_vars) (struct ieee80211_hw *hw);
void (*deinit_sw_vars) (struct ieee80211_hw *hw);
@@ -1673,9 +1908,17 @@ struct rtl_hal_ops {
void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
void (*update_rate_tbl) (struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u8 rssi_level);
+ void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc,
+ u8 *desc, u8 queue_index,
+ struct sk_buff *skb, dma_addr_t addr);
void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level);
+ u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw,
+ u8 queue_index);
+ void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc,
+ u8 queue_index);
void (*fill_tx_desc) (struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ u8 *pbd_desc_tx,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
@@ -1698,8 +1941,11 @@ struct rtl_hal_ops {
enum rf_pwrstate rfpwr_state);
void (*led_control) (struct ieee80211_hw *hw,
enum led_ctl_mode ledaction);
- void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+ void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
+ bool (*is_tx_desc_closed) (struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue);
void (*enable_hw_sec) (struct ieee80211_hw *hw);
void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
@@ -1738,6 +1984,10 @@ struct rtl_hal_ops {
void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
u32 cmd_len, u8 *p_cmdbuffer);
+ bool (*get_btc_status) (void);
+ bool (*is_fw_header) (struct rtl92c_firmware_header *hdr);
+ u32 (*rx_command_packet)(struct ieee80211_hw *hw,
+ struct rtl_stats status, struct sk_buff *skb);
};
struct rtl_intf_ops {
@@ -1847,6 +2097,8 @@ struct rtl_locks {
/*Easy concurrent*/
spinlock_t check_sendpkt_lock;
+
+ spinlock_t iqk_lock;
};
struct rtl_works {
@@ -1915,6 +2167,7 @@ struct ps_t {
u8 cur_ccasate;
u8 pre_rfstate;
u8 cur_rfstate;
+ u8 initialize;
long rssi_val_min;
};
@@ -1939,6 +2192,7 @@ struct dig_t {
u8 cursta_cstate;
u8 presta_cstate;
u8 curmultista_cstate;
+ u8 stop_dig;
char back_val;
char back_range_max;
char back_range_min;
@@ -1956,6 +2210,7 @@ struct dig_t {
u8 cur_ccasate;
u8 large_fa_hit;
u8 dig_dynamic_min;
+ u8 dig_dynamic_min_1;
u8 forbidden_igi;
u8 dig_state;
u8 dig_highpwrstate;
@@ -1972,6 +2227,7 @@ struct dig_t {
char backoffval_range_min;
u8 dig_min_0;
u8 dig_min_1;
+ u8 bt30_cur_igi;
bool media_connect_0;
bool media_connect_1;
@@ -1986,6 +2242,96 @@ struct rtl_global_var {
spinlock_t glb_list_lock;
};
+struct rtl_btc_info {
+ u8 bt_type;
+ u8 btcoexist;
+ u8 ant_num;
+};
+
+struct bt_coexist_info {
+ struct rtl_btc_ops *btc_ops;
+ struct rtl_btc_info btc_info;
+ /* EEPROM BT info. */
+ u8 eeprom_bt_coexist;
+ u8 eeprom_bt_type;
+ u8 eeprom_bt_ant_num;
+ u8 eeprom_bt_ant_isol;
+ u8 eeprom_bt_radio_shared;
+
+ u8 bt_coexistence;
+ u8 bt_ant_num;
+ u8 bt_coexist_type;
+ u8 bt_state;
+ u8 bt_cur_state; /* 0:on, 1:off */
+ u8 bt_ant_isolation; /* 0:good, 1:bad */
+ u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
+ u8 bt_service;
+ u8 bt_radio_shared_type;
+ u8 bt_rfreg_origin_1e;
+ u8 bt_rfreg_origin_1f;
+ u8 bt_rssi_state;
+ u32 ratio_tx;
+ u32 ratio_pri;
+ u32 bt_edca_ul;
+ u32 bt_edca_dl;
+
+ bool init_set;
+ bool bt_busy_traffic;
+ bool bt_traffic_mode_set;
+ bool bt_non_traffic_mode_set;
+
+ bool fw_coexist_all_off;
+ bool sw_coexist_all_off;
+ bool hw_coexist_all_off;
+ u32 cstate;
+ u32 previous_state;
+ u32 cstate_h;
+ u32 previous_state_h;
+
+ u8 bt_pre_rssi_state;
+ u8 bt_pre_rssi_state1;
+
+ u8 reg_bt_iso;
+ u8 reg_bt_sco;
+ bool balance_on;
+ u8 bt_active_zero_cnt;
+ bool cur_bt_disabled;
+ bool pre_bt_disabled;
+
+ u8 bt_profile_case;
+ u8 bt_profile_action;
+ bool bt_busy;
+ bool hold_for_bt_operation;
+ u8 lps_counter;
+};
+
+struct rtl_btc_ops {
+ void (*btc_init_variables) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+ void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
+ void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
+ void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
+ void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
+ void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
+ enum _RT_MEDIA_STATUS mstatus);
+ void (*btc_periodical) (struct rtl_priv *rtlpriv);
+ void (*btc_halt_notify) (void);
+ void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
+ u8 *tmp_buf, u8 length);
+ bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv);
+ bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv);
+ bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
+};
+
+struct proxim {
+ bool proxim_on;
+
+ void *proximity_priv;
+ int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status,
+ struct sk_buff *skb);
+ u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
+};
+
struct rtl_priv {
struct ieee80211_hw *hw;
struct completion firmware_loading_complete;
@@ -2008,6 +2354,7 @@ struct rtl_priv {
struct rtl_ps_ctl psc;
struct rate_adaptive ra;
+ struct dynamic_primary_cca primarycca;
struct wireless_stats stats;
struct rt_link_detect link_info;
struct false_alarm_statistics falsealm_cnt;
@@ -2048,6 +2395,20 @@ struct rtl_priv {
bool enter_ps; /* true when entering PS */
u8 rate_mask[5];
+ /* intel Proximity, should be alloc mem
+ * in intel Proximity module and can only
+ * be used in intel Proximity mode
+ */
+ struct proxim proximity;
+
+ /*for bt coexist use*/
+ struct bt_coexist_info btcoexist;
+
+ /* separate 92ee from other ICs,
+ * 92ee use new trx flow.
+ */
+ bool use_new_trx_flow;
+
/*This must be the last item so
that it points to the data allocated
beyond this structure like:
@@ -2079,6 +2440,15 @@ enum bt_co_type {
BT_CSR_BC8 = 4,
BT_RTL8756 = 5,
BT_RTL8723A = 6,
+ BT_RTL8821A = 7,
+ BT_RTL8723B = 8,
+ BT_RTL8192E = 9,
+ BT_RTL8812A = 11,
+};
+
+enum bt_total_ant_num {
+ ANT_TOTAL_X2 = 0,
+ ANT_TOTAL_X1 = 1
};
enum bt_cur_state {
@@ -2104,62 +2474,6 @@ enum bt_radio_shared {
BT_RADIO_INDIVIDUAL = 1,
};
-struct bt_coexist_info {
-
- /* EEPROM BT info. */
- u8 eeprom_bt_coexist;
- u8 eeprom_bt_type;
- u8 eeprom_bt_ant_num;
- u8 eeprom_bt_ant_isol;
- u8 eeprom_bt_radio_shared;
-
- u8 bt_coexistence;
- u8 bt_ant_num;
- u8 bt_coexist_type;
- u8 bt_state;
- u8 bt_cur_state; /* 0:on, 1:off */
- u8 bt_ant_isolation; /* 0:good, 1:bad */
- u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
- u8 bt_service;
- u8 bt_radio_shared_type;
- u8 bt_rfreg_origin_1e;
- u8 bt_rfreg_origin_1f;
- u8 bt_rssi_state;
- u32 ratio_tx;
- u32 ratio_pri;
- u32 bt_edca_ul;
- u32 bt_edca_dl;
-
- bool init_set;
- bool bt_busy_traffic;
- bool bt_traffic_mode_set;
- bool bt_non_traffic_mode_set;
-
- bool fw_coexist_all_off;
- bool sw_coexist_all_off;
- bool hw_coexist_all_off;
- u32 cstate;
- u32 previous_state;
- u32 cstate_h;
- u32 previous_state_h;
-
- u8 bt_pre_rssi_state;
- u8 bt_pre_rssi_state1;
-
- u8 reg_bt_iso;
- u8 reg_bt_sco;
- bool balance_on;
- u8 bt_active_zero_cnt;
- bool cur_bt_disabled;
- bool pre_bt_disabled;
-
- u8 bt_profile_case;
- u8 bt_profile_action;
- bool bt_busy;
- bool hold_for_bt_operation;
- u8 lps_counter;
-};
-
/****************************************
mem access macro define start
diff --git a/drivers/net/wireless/ti/wilink_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c
index 998e95895f9d..a92bd3e89796 100644
--- a/drivers/net/wireless/ti/wilink_platform_data.c
+++ b/drivers/net/wireless/ti/wilink_platform_data.c
@@ -23,17 +23,17 @@
#include <linux/err.h>
#include <linux/wl12xx.h>
-static struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *wl12xx_platform_data;
int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
{
- if (platform_data)
+ if (wl12xx_platform_data)
return -EBUSY;
if (!data)
return -EINVAL;
- platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
- if (!platform_data)
+ wl12xx_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
+ if (!wl12xx_platform_data)
return -ENOMEM;
return 0;
@@ -41,9 +41,34 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
struct wl12xx_platform_data *wl12xx_get_platform_data(void)
{
- if (!platform_data)
+ if (!wl12xx_platform_data)
return ERR_PTR(-ENODEV);
- return platform_data;
+ return wl12xx_platform_data;
}
EXPORT_SYMBOL(wl12xx_get_platform_data);
+
+static struct wl1251_platform_data *wl1251_platform_data;
+
+int __init wl1251_set_platform_data(const struct wl1251_platform_data *data)
+{
+ if (wl1251_platform_data)
+ return -EBUSY;
+ if (!data)
+ return -EINVAL;
+
+ wl1251_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
+ if (!wl1251_platform_data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+struct wl1251_platform_data *wl1251_get_platform_data(void)
+{
+ if (!wl1251_platform_data)
+ return ERR_PTR(-ENODEV);
+
+ return wl1251_platform_data;
+}
+EXPORT_SYMBOL(wl1251_get_platform_data);
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index 223649bcaa5a..bf1fa18b9786 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -448,7 +448,7 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
* Note: This bug may be caused by the fw's DTIM handling.
*/
if (is_zero_ether_addr(wl->bssid))
- cmd->params.scan_options |= WL1251_SCAN_OPT_PRIORITY_HIGH;
+ cmd->params.scan_options |= cpu_to_le16(WL1251_SCAN_OPT_PRIORITY_HIGH);
cmd->params.num_channels = n_channels;
cmd->params.num_probe_requests = n_probes;
cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index e2b3d9c541e8..b661f896e9fe 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -28,6 +28,7 @@
#include <linux/wl12xx.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
#include "wl1251.h"
@@ -182,8 +183,9 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
* callback in case it wants to do any additional setup,
* for example enabling clock buffer for the module.
*/
- if (wl->set_power)
- wl->set_power(true);
+ if (gpio_is_valid(wl->power_gpio))
+ gpio_set_value(wl->power_gpio, true);
+
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0) {
@@ -203,8 +205,8 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
if (ret < 0)
goto out;
- if (wl->set_power)
- wl->set_power(false);
+ if (gpio_is_valid(wl->power_gpio))
+ gpio_set_value(wl->power_gpio, false);
}
out:
@@ -227,7 +229,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
struct wl1251 *wl;
struct ieee80211_hw *hw;
struct wl1251_sdio *wl_sdio;
- const struct wl12xx_platform_data *wl12xx_board_data;
+ const struct wl1251_platform_data *wl1251_board_data;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
@@ -254,11 +256,20 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl->if_priv = wl_sdio;
wl->if_ops = &wl1251_sdio_ops;
- wl12xx_board_data = wl12xx_get_platform_data();
- if (!IS_ERR(wl12xx_board_data)) {
- wl->set_power = wl12xx_board_data->set_power;
- wl->irq = wl12xx_board_data->irq;
- wl->use_eeprom = wl12xx_board_data->use_eeprom;
+ wl1251_board_data = wl1251_get_platform_data();
+ if (!IS_ERR(wl1251_board_data)) {
+ wl->power_gpio = wl1251_board_data->power_gpio;
+ wl->irq = wl1251_board_data->irq;
+ wl->use_eeprom = wl1251_board_data->use_eeprom;
+ }
+
+ if (gpio_is_valid(wl->power_gpio)) {
+ ret = devm_gpio_request(&func->dev, wl->power_gpio,
+ "wl1251 power");
+ if (ret) {
+ wl1251_error("Failed to request gpio: %d\n", ret);
+ goto disable;
+ }
}
if (wl->irq) {
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 1342f81e683d..b06d36d99362 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -26,6 +26,10 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
#include "wl1251.h"
#include "reg.h"
@@ -221,8 +225,8 @@ static void wl1251_spi_disable_irq(struct wl1251 *wl)
static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
{
- if (wl->set_power)
- wl->set_power(enable);
+ if (gpio_is_valid(wl->power_gpio))
+ gpio_set_value(wl->power_gpio, enable);
return 0;
}
@@ -238,13 +242,13 @@ static const struct wl1251_if_operations wl1251_spi_ops = {
static int wl1251_spi_probe(struct spi_device *spi)
{
- struct wl12xx_platform_data *pdata;
+ struct wl1251_platform_data *pdata = dev_get_platdata(&spi->dev);
+ struct device_node *np = spi->dev.of_node;
struct ieee80211_hw *hw;
struct wl1251 *wl;
int ret;
- pdata = dev_get_platdata(&spi->dev);
- if (!pdata) {
+ if (!np && !pdata) {
wl1251_error("no platform data");
return -ENODEV;
}
@@ -271,22 +275,42 @@ static int wl1251_spi_probe(struct spi_device *spi)
goto out_free;
}
- wl->set_power = pdata->set_power;
- if (!wl->set_power) {
- wl1251_error("set power function missing in platform data");
- return -ENODEV;
+ if (np) {
+ wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
+ wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+ } else if (pdata) {
+ wl->power_gpio = pdata->power_gpio;
+ wl->use_eeprom = pdata->use_eeprom;
+ }
+
+ if (wl->power_gpio == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_free;
+ }
+
+ if (gpio_is_valid(wl->power_gpio)) {
+ ret = devm_gpio_request_one(&spi->dev, wl->power_gpio,
+ GPIOF_OUT_INIT_LOW, "wl1251 power");
+ if (ret) {
+ wl1251_error("Failed to request gpio: %d\n", ret);
+ goto out_free;
+ }
+ } else {
+ wl1251_error("set power gpio missing in platform data");
+ ret = -ENODEV;
+ goto out_free;
}
wl->irq = spi->irq;
if (wl->irq < 0) {
wl1251_error("irq missing in platform data");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free;
}
- wl->use_eeprom = pdata->use_eeprom;
-
irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
- ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
+ ret = devm_request_irq(&spi->dev, wl->irq, wl1251_irq, 0,
+ DRIVER_NAME, wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
goto out_free;
@@ -294,16 +318,26 @@ static int wl1251_spi_probe(struct spi_device *spi)
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+ wl->vio = devm_regulator_get(&spi->dev, "vio");
+ if (IS_ERR(wl->vio)) {
+ ret = PTR_ERR(wl->vio);
+ wl1251_error("vio regulator missing: %d", ret);
+ goto out_free;
+ }
+
+ ret = regulator_enable(wl->vio);
+ if (ret)
+ goto out_free;
+
ret = wl1251_init_ieee80211(wl);
if (ret)
- goto out_irq;
+ goto disable_regulator;
return 0;
- out_irq:
- free_irq(wl->irq, wl);
-
- out_free:
+disable_regulator:
+ regulator_disable(wl->vio);
+out_free:
ieee80211_free_hw(hw);
return ret;
@@ -315,6 +349,7 @@ static int wl1251_spi_remove(struct spi_device *spi)
free_irq(wl->irq, wl);
wl1251_free_hw(wl);
+ regulator_disable(wl->vio);
return 0;
}
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 235617a7716d..16dae5269175 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -276,10 +276,12 @@ struct wl1251 {
void *if_priv;
const struct wl1251_if_operations *if_ops;
- void (*set_power)(bool enable);
+ int power_gpio;
int irq;
bool use_eeprom;
+ struct regulator *vio;
+
spinlock_t wl_lock;
enum wl1251_state state;
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index be7129ba16ad..d50dfac91631 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1378,7 +1378,7 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
{
- if (wl->fw_status_1->tx_results_counter ==
+ if (wl->fw_status->tx_results_counter ==
(wl->tx_results_count & 0xff))
return 0;
@@ -1438,6 +1438,37 @@ out:
return ret;
}
+static void wl12xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl12xx_fw_status *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+}
+
static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
@@ -1677,6 +1708,7 @@ static struct wlcore_ops wl12xx_ops = {
.tx_delayed_compl = wl12xx_tx_delayed_compl,
.hw_init = wl12xx_hw_init,
.init_vif = NULL,
+ .convert_fw_status = wl12xx_convert_fw_status,
.sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
.get_pg_ver = wl12xx_get_pg_ver,
.get_mac = wl12xx_get_mac,
@@ -1711,22 +1743,53 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
},
};
+static const struct ieee80211_iface_limit wl12xx_iface_limits[] = {
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wl12xx_iface_combinations[] = {
+ {
+ .max_interfaces = 3,
+ .limits = wl12xx_iface_limits,
+ .n_limits = ARRAY_SIZE(wl12xx_iface_limits),
+ .num_different_channels = 1,
+ },
+};
+
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
+ BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS);
+ BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS);
+
wl->rtable = wl12xx_rtable;
wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
- wl->num_channels = 1;
+ wl->num_links = WL12XX_MAX_LINKS;
+ wl->max_ap_stations = WL12XX_MAX_AP_STATIONS;
+ wl->iface_combinations = wl12xx_iface_combinations;
+ wl->n_iface_combinations = ARRAY_SIZE(wl12xx_iface_combinations);
wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_len = sizeof(struct wl12xx_fw_status);
wl->fw_status_priv_len = 0;
wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
+ wl->ofdm_only_ap = true;
wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
wl12xx_conf_init(wl);
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 9e5484a73667..75c92658bfea 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -65,6 +65,9 @@
#define WL12XX_RX_BA_MAX_SESSIONS 3
+#define WL12XX_MAX_AP_STATIONS 8
+#define WL12XX_MAX_LINKS 12
+
struct wl127x_rx_mem_pool_addr {
u32 addr;
u32 addr_extra;
@@ -79,4 +82,54 @@ struct wl12xx_priv {
struct wl127x_rx_mem_pool_addr *rx_mem_addr;
};
+struct wl12xx_fw_packet_counters {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl12xx_fw_status {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL12XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl12xx_fw_packet_counters counters;
+
+ __le32 log_start_addr;
+} __packed;
+
#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index ec37b16585df..de5b4fa5d166 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -648,7 +648,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
};
/* TODO: maybe move to a new header file? */
-#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin"
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-3.bin"
static int wl18xx_identify_chip(struct wl1271 *wl)
{
@@ -1133,6 +1133,39 @@ static int wl18xx_hw_init(struct wl1271 *wl)
return ret;
}
+static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl18xx_fw_status *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+
+ fw_status->priv = &int_fw_status->priv;
+}
+
static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
@@ -1572,7 +1605,7 @@ static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
/* suspended links are never high priority */
@@ -1594,7 +1627,7 @@ static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
@@ -1632,6 +1665,7 @@ static struct wlcore_ops wl18xx_ops = {
.tx_immediate_compl = wl18xx_tx_immediate_completion,
.tx_delayed_compl = NULL,
.hw_init = wl18xx_hw_init,
+ .convert_fw_status = wl18xx_convert_fw_status,
.set_tx_desc_csum = wl18xx_set_tx_desc_csum,
.get_pg_ver = wl18xx_get_pg_ver,
.set_rx_csum = wl18xx_set_rx_csum,
@@ -1713,19 +1747,62 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
},
};
+static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wl18xx_iface_combinations[] = {
+ {
+ .max_interfaces = 3,
+ .limits = wl18xx_iface_limits,
+ .n_limits = ARRAY_SIZE(wl18xx_iface_limits),
+ .num_different_channels = 2,
+ },
+ {
+ .max_interfaces = 2,
+ .limits = wl18xx_iface_ap_limits,
+ .n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits),
+ .num_different_channels = 1,
+ }
+};
+
static int wl18xx_setup(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
int ret;
+ BUILD_BUG_ON(WL18XX_MAX_LINKS > WLCORE_MAX_LINKS);
+ BUILD_BUG_ON(WL18XX_MAX_AP_STATIONS > WL18XX_MAX_LINKS);
+
wl->rtable = wl18xx_rtable;
wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
- wl->num_channels = 2;
+ wl->num_links = WL18XX_MAX_LINKS;
+ wl->max_ap_stations = WL18XX_MAX_AP_STATIONS;
+ wl->iface_combinations = wl18xx_iface_combinations;
+ wl->n_iface_combinations = ARRAY_SIZE(wl18xx_iface_combinations);
wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_len = sizeof(struct wl18xx_fw_status);
wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 57c694396647..be1ebd55ac88 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -32,7 +32,7 @@ static
void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
struct ieee80211_tx_rate *rate)
{
- u8 fw_rate = wl->fw_status_2->counters.tx_last_rate;
+ u8 fw_rate = wl->fw_status->counters.tx_last_rate;
if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
wl1271_error("last Tx rate invalid: %d", fw_rate);
@@ -139,7 +139,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
void wl18xx_tx_immediate_complete(struct wl1271 *wl)
{
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
struct wl18xx_priv *priv = wl->priv;
u8 i;
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 9204e07ee432..eb7cfe817010 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -26,10 +26,10 @@
/* minimum FW required for driver */
#define WL18XX_CHIP_VER 8
-#define WL18XX_IFTYPE_VER 5
+#define WL18XX_IFTYPE_VER 8
#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
-#define WL18XX_MINOR_VER 39
+#define WL18XX_MINOR_VER 13
#define WL18XX_CMD_MAX_SIZE 740
@@ -40,7 +40,10 @@
#define WL18XX_NUM_MAC_ADDRESSES 3
-#define WL18XX_RX_BA_MAX_SESSIONS 5
+#define WL18XX_RX_BA_MAX_SESSIONS 13
+
+#define WL18XX_MAX_AP_STATIONS 10
+#define WL18XX_MAX_LINKS 16
struct wl18xx_priv {
/* buffer for sending commands to FW */
@@ -109,6 +112,59 @@ struct wl18xx_fw_status_priv {
u8 padding[3];
};
+struct wl18xx_fw_packet_counters {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL18XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl18xx_fw_status {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL18XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl18xx_fw_packet_counters counters;
+
+ __le32 log_start_addr;
+
+ /* Private status to be used by the lower drivers */
+ struct wl18xx_fw_status_priv priv;
+} __packed;
+
#define WL18XX_PHY_VERSION_MAX_LEN 20
struct wl18xx_static_data_priv {
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index ec83675a2446..b924ceadc02c 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -358,7 +358,8 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct acx_beacon_filter_option *beacon_filter = NULL;
int ret = 0;
- wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
+ wl1271_debug(DEBUG_ACX, "acx beacon filter opt enable=%d",
+ enable_filter);
if (enable_filter &&
wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
@@ -1591,7 +1592,8 @@ out:
return ret;
}
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 *addr)
{
struct wl1271_acx_inconnection_sta *acx = NULL;
int ret;
@@ -1603,6 +1605,7 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
return -ENOMEM;
memcpy(acx->addr, addr, ETH_ALEN);
+ acx->role_id = wlvif->role_id;
ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
acx, sizeof(*acx));
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 6dcfad9b0472..954d57ec98f4 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -824,7 +824,8 @@ struct wl1271_acx_inconnection_sta {
struct acx_header header;
u8 addr[ETH_ALEN];
- u8 padding1[2];
+ u8 role_id;
+ u8 padding;
} __packed;
/*
@@ -1118,7 +1119,8 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable);
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 *addr);
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
int wl12xx_acx_config_hangover(struct wl1271 *wl);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 9b2ecf52449f..40dc30f4faaa 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -60,8 +60,8 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
u16 status;
u16 poll_count = 0;
- if (WARN_ON(wl->state == WLCORE_STATE_RESTARTING &&
- id != CMD_STOP_FWLOGGER))
+ if (unlikely(wl->state == WLCORE_STATE_RESTARTING &&
+ id != CMD_STOP_FWLOGGER))
return -EIO;
cmd = buf;
@@ -312,8 +312,8 @@ static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
unsigned long flags;
- u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
- if (link >= WL12XX_MAX_LINKS)
+ u8 link = find_first_zero_bit(wl->links_map, wl->num_links);
+ if (link >= wl->num_links)
return -EBUSY;
wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
@@ -324,9 +324,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
__set_bit(link, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- /* take the last "freed packets" value from the current FW status */
- wl->links[link].prev_freed_pkts =
- wl->fw_status_2->counters.tx_lnk_free_pkts[link];
+ /*
+ * take the last "freed packets" value from the current FW status.
+ * on recovery, we might not have fw_status yet, and
+ * tx_lnk_free_pkts will be NULL. check for it.
+ */
+ if (wl->fw_status->counters.tx_lnk_free_pkts)
+ wl->links[link].prev_freed_pkts =
+ wl->fw_status->counters.tx_lnk_free_pkts[link];
wl->links[link].wlvif = wlvif;
/*
@@ -1527,6 +1532,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->sp_len = sta->max_sp;
cmd->wmm = sta->wme ? 1 : 0;
cmd->session_id = wl->session_ids[hlid];
+ cmd->role_id = wlvif->role_id;
for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
if (sta->wme && (sta->uapsd_queues & BIT(i)))
@@ -1563,7 +1569,8 @@ out:
return ret;
}
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid)
{
struct wl12xx_cmd_remove_peer *cmd;
int ret;
@@ -1581,6 +1588,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
/* We never send a deauth, mac80211 is in charge of this */
cmd->reason_opcode = 0;
cmd->send_deauth_flag = 0;
+ cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 323d4a856e4b..b084830a61cf 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -88,7 +88,8 @@ int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta, u8 hlid);
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid);
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
enum ieee80211_band band);
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
@@ -206,7 +207,7 @@ enum cmd_templ {
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_DFLT_SIZE 252
#define WL1271_CMD_TEMPL_MAX_SIZE 512
-#define WL1271_EVENT_TIMEOUT 1500
+#define WL1271_EVENT_TIMEOUT 5000
struct wl1271_cmd_header {
__le16 id;
@@ -594,6 +595,8 @@ struct wl12xx_cmd_add_peer {
u8 sp_len;
u8 wmm;
u8 session_id;
+ u8 role_id;
+ u8 padding[3];
} __packed;
struct wl12xx_cmd_remove_peer {
@@ -602,7 +605,7 @@ struct wl12xx_cmd_remove_peer {
u8 hlid;
u8 reason_opcode;
u8 send_deauth_flag;
- u8 padding1;
+ u8 role_id;
} __packed;
/*
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 8d3b34965db3..1f9a36031b06 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -67,7 +67,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
u8 hlid;
struct wl1271_link *lnk;
for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
- WL12XX_MAX_LINKS) {
+ wl->num_links) {
lnk = &wl->links[hlid];
if (!lnk->ba_bitmap)
continue;
@@ -172,7 +172,7 @@ static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
const u8 *addr;
int h;
- for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ for_each_set_bit(h, &sta_bitmap, wl->num_links) {
bool found = false;
/* find the ap vif connected to this sta */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 51f8d634d32f..1555ff970050 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -106,6 +106,15 @@ wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
return 0;
}
+static inline void
+wlcore_hw_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ BUG_ON(!wl->ops->convert_fw_status);
+
+ wl->ops->convert_fw_status(wl, raw_fw_status, fw_status);
+}
+
static inline u32
wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 7699f9d07e26..199e94120864 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -287,8 +287,8 @@ static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
if (ret < 0)
return ret;
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+ /* disable beacon filtering until we get the first beacon */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
@@ -462,7 +462,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
* If the basic rates contain OFDM rates, use OFDM only
* rates for unicast TX as well. Else use all supported rates.
*/
- if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
+ if (wl->ofdm_only_ap && (wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
supported_rates = CONF_TX_ENABLED_RATES;
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 07e3d6a049ad..0305729d0986 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -60,7 +60,9 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
{
int ret;
- if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+ WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+ addr != HW_ACCESS_ELP_CTRL_REG)))
return -EIO;
ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
@@ -76,7 +78,9 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
{
int ret;
- if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+ WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+ addr != HW_ACCESS_ELP_CTRL_REG)))
return -EIO;
ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b46b3116cc55..ed88d3913483 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -345,24 +345,24 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
* Start high-level PS if the STA is asleep with enough blocks in FW.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
- * Note that a single connected STA means 3 active links, since we must
- * account for the global and broadcast AP links. The "fw_ps" check
- * assures us the third link is a STA connected to the AP. Otherwise
- * the FW would not set the PSM bit.
+ * Note that a single connected STA means 2*ap_count + 1 active links,
+ * since we must account for the global and broadcast AP links
+ * for each AP. The "fw_ps" check assures us the other link is a STA
+ * connected to the AP. Otherwise the FW would not set the PSM bit.
*/
- else if (wl->active_link_count > 3 && fw_ps &&
+ else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
- struct wl_fw_status_2 *status)
+ struct wl_fw_status *status)
{
u32 cur_fw_ps_map;
u8 hlid;
- cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+ cur_fw_ps_map = status->link_ps_bitmap;
if (wl->ap_fw_ps_map != cur_fw_ps_map) {
wl1271_debug(DEBUG_PSM,
"link ps prev 0x%x cur 0x%x changed 0x%x",
@@ -372,77 +372,73 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
wl->links[hlid].allocated_pkts);
}
-static int wlcore_fw_status(struct wl1271 *wl,
- struct wl_fw_status_1 *status_1,
- struct wl_fw_status_2 *status_2)
+static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
{
struct wl12xx_vif *wlvif;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
int i;
- size_t status_len;
int ret;
struct wl1271_link *lnk;
- status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
- sizeof(*status_2) + wl->fw_status_priv_len;
-
- ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
- status_len, false);
+ ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
+ wl->raw_fw_status,
+ wl->fw_status_len, false);
if (ret < 0)
return ret;
+ wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
+
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
- status_1->intr,
- status_1->fw_rx_counter,
- status_1->drv_rx_counter,
- status_1->tx_results_counter);
+ status->intr,
+ status->fw_rx_counter,
+ status->drv_rx_counter,
+ status->tx_results_counter);
for (i = 0; i < NUM_TX_QUEUES; i++) {
/* prevent wrap-around in freed-packets counter */
wl->tx_allocated_pkts[i] -=
- (status_2->counters.tx_released_pkts[i] -
+ (status->counters.tx_released_pkts[i] -
wl->tx_pkts_freed[i]) & 0xff;
- wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
+ wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
}
- for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
+ for_each_set_bit(i, wl->links_map, wl->num_links) {
u8 diff;
lnk = &wl->links[i];
/* prevent wrap-around in freed-packets counter */
- diff = (status_2->counters.tx_lnk_free_pkts[i] -
+ diff = (status->counters.tx_lnk_free_pkts[i] -
lnk->prev_freed_pkts) & 0xff;
if (diff == 0)
continue;
lnk->allocated_pkts -= diff;
- lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
+ lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
/* accumulate the prev_freed_pkts counter */
lnk->total_freed_pkts += diff;
}
/* prevent wrap-around in total blocks counter */
- if (likely(wl->tx_blocks_freed <=
- le32_to_cpu(status_2->total_released_blks)))
- freed_blocks = le32_to_cpu(status_2->total_released_blks) -
+ if (likely(wl->tx_blocks_freed <= status->total_released_blks))
+ freed_blocks = status->total_released_blks -
wl->tx_blocks_freed;
else
freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
- le32_to_cpu(status_2->total_released_blks);
+ status->total_released_blks;
- wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
+ wl->tx_blocks_freed = status->total_released_blks;
wl->tx_allocated_blocks -= freed_blocks;
@@ -458,7 +454,7 @@ static int wlcore_fw_status(struct wl1271 *wl,
cancel_delayed_work(&wl->tx_watchdog_work);
}
- avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
+ avail = status->tx_total - wl->tx_allocated_blocks;
/*
* The FW might change the total number of TX memblocks before
@@ -477,15 +473,15 @@ static int wlcore_fw_status(struct wl1271 *wl,
/* for AP update num of allocated TX blocks per link and ps status */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
- wl12xx_irq_update_links_status(wl, wlvif, status_2);
+ wl12xx_irq_update_links_status(wl, wlvif, status);
}
/* update the host-chipset time offset */
getnstimeofday(&ts);
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
- (s64)le32_to_cpu(status_2->fw_localtime);
+ (s64)(status->fw_localtime);
- wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
+ wl->fw_fast_lnk_map = status->link_fast_bitmap;
return 0;
}
@@ -549,13 +545,13 @@ static int wlcore_irq_locked(struct wl1271 *wl)
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
- ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
goto out;
wlcore_hw_tx_immediate_compl(wl);
- intr = le32_to_cpu(wl->fw_status_1->intr);
+ intr = wl->fw_status->intr;
intr &= WLCORE_ALL_INTR_MASK;
if (!intr) {
done = true;
@@ -584,7 +580,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- ret = wlcore_rx(wl, wl->fw_status_1);
+ ret = wlcore_rx(wl, wl->fw_status);
if (ret < 0)
goto out;
@@ -786,10 +782,11 @@ out:
void wl12xx_queue_recovery_work(struct wl1271 *wl)
{
- WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
-
/* Avoid a recursive recovery */
if (wl->state == WLCORE_STATE_ON) {
+ WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
+ &wl->flags));
+
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
wl1271_ps_elp_wakeup(wl);
@@ -803,7 +800,7 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
size_t len;
/* Make sure we have enough room */
- len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
+ len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
/* Fill the FW log file, consumed by the sysfs fwlog entry */
memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
@@ -843,11 +840,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
wl12xx_cmd_stop_fwlog(wl);
/* Read the first memory block address */
- ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
goto out;
- addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
+ addr = wl->fw_status->log_start_addr;
if (!addr)
goto out;
@@ -990,23 +987,23 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
static int wl1271_setup(struct wl1271 *wl)
{
- wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
- sizeof(*wl->fw_status_2) +
- wl->fw_status_priv_len, GFP_KERNEL);
- if (!wl->fw_status_1)
- return -ENOMEM;
+ wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
+ if (!wl->raw_fw_status)
+ goto err;
- wl->fw_status_2 = (struct wl_fw_status_2 *)
- (((u8 *) wl->fw_status_1) +
- WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
+ wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
+ if (!wl->fw_status)
+ goto err;
wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
- if (!wl->tx_res_if) {
- kfree(wl->fw_status_1);
- return -ENOMEM;
- }
+ if (!wl->tx_res_if)
+ goto err;
return 0;
+err:
+ kfree(wl->fw_status);
+ kfree(wl->raw_fw_status);
+ return -ENOMEM;
}
static int wl12xx_set_power_on(struct wl1271 *wl)
@@ -1767,6 +1764,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
flush_work(&wl->tx_work);
flush_delayed_work(&wl->elp_work);
+ /*
+ * Cancel the watchdog even if above tx_flush failed. We will detect
+ * it on resume anyway.
+ */
+ cancel_delayed_work(&wl->tx_watchdog_work);
+
return 0;
}
@@ -1824,6 +1827,13 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
out:
wl->wow_enabled = false;
+
+ /*
+ * Set a flag to re-init the watchdog on the first Tx after resume.
+ * That way we avoid possible conditions where Tx-complete interrupts
+ * fail to arrive and we perform a spurious recovery.
+ */
+ set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
mutex_unlock(&wl->mutex);
return 0;
@@ -1914,6 +1924,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
memset(wl->links_map, 0, sizeof(wl->links_map));
memset(wl->roc_map, 0, sizeof(wl->roc_map));
memset(wl->session_ids, 0, sizeof(wl->session_ids));
+ memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
wl->active_sta_count = 0;
wl->active_link_count = 0;
@@ -1938,9 +1949,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
wl1271_debugfs_reset(wl);
- kfree(wl->fw_status_1);
- wl->fw_status_1 = NULL;
- wl->fw_status_2 = NULL;
+ kfree(wl->raw_fw_status);
+ wl->raw_fw_status = NULL;
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
kfree(wl->tx_res_if);
wl->tx_res_if = NULL;
kfree(wl->target_mem_map);
@@ -2571,10 +2583,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
ieee80211_scan_completed(wl->hw, true);
}
- if (wl->sched_vif == wlvif) {
- ieee80211_sched_scan_stopped(wl->hw);
+ if (wl->sched_vif == wlvif)
wl->sched_vif = NULL;
- }
if (wl->roc_vif == vif) {
wl->roc_vif = NULL;
@@ -2931,6 +2941,11 @@ static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
+
+ /* disable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
}
if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
@@ -3463,6 +3478,10 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
key_idx);
+ /* we don't handle unsetting of default key */
+ if (key_idx == -1)
+ return;
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
@@ -3649,8 +3668,8 @@ out:
return ret;
}
-static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
@@ -3672,6 +3691,8 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
+
+ return 0;
}
static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -4298,6 +4319,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
}
+ if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+ if (ret < 0)
+ goto out;
+ }
+
ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
@@ -4651,7 +4679,7 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
int ret;
- if (wl->active_sta_count >= AP_MAX_STATIONS) {
+ if (wl->active_sta_count >= wl->max_ap_stations) {
wl1271_warning("could not allocate HLID - too much stations");
return -EBUSY;
}
@@ -4754,7 +4782,7 @@ static int wl12xx_sta_remove(struct wl1271 *wl,
if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
return -EINVAL;
- ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
+ ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
if (ret < 0)
return ret;
@@ -5679,28 +5707,6 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
}
-static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
- {
- .max = 3,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
-};
-
-static struct ieee80211_iface_combination
-wlcore_iface_combinations[] = {
- {
- .max_interfaces = 3,
- .limits = wlcore_iface_limits,
- .n_limits = ARRAY_SIZE(wlcore_iface_limits),
- },
-};
-
static int wl1271_init_ieee80211(struct wl1271 *wl)
{
int i;
@@ -5733,7 +5739,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_QUEUE_CONTROL;
+ IEEE80211_HW_QUEUE_CONTROL |
+ IEEE80211_HW_CHANCTX_STA_CSA;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -5821,10 +5828,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
/* allowed interface combinations */
- wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
- wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
- wl->hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(wlcore_iface_combinations);
+ wl->hw->wiphy->iface_combinations = wl->iface_combinations;
+ wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
SET_IEEE80211_DEV(wl->hw, wl->dev);
@@ -5844,8 +5849,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
int i, j, ret;
unsigned int order;
- BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
-
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
@@ -5867,8 +5870,12 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
wl->hw = hw;
+ /*
+ * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
+ * we don't allocate any additional resource here, so that's fine.
+ */
for (i = 0; i < NUM_TX_QUEUES; i++)
- for (j = 0; j < WL12XX_MAX_LINKS; j++)
+ for (j = 0; j < WLCORE_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
@@ -6011,7 +6018,8 @@ int wlcore_free_hw(struct wl1271 *wl)
kfree(wl->nvs);
wl->nvs = NULL;
- kfree(wl->fw_status_1);
+ kfree(wl->raw_fw_status);
+ kfree(wl->fw_status);
kfree(wl->tx_res_if);
destroy_workqueue(wl->freezable_wq);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 26bfc365ba70..b52516eed7b2 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -280,7 +280,11 @@ void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- if (test_bit(hlid, &wl->ap_ps_map))
+ if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
+ return;
+
+ if (!test_bit(hlid, wlvif->ap.sta_hlid_map) ||
+ test_bit(hlid, &wl->ap_ps_map))
return;
wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 6791a1a6afba..e125974285cc 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -203,9 +203,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
return is_data;
}
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status)
{
- unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+ unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
@@ -263,12 +263,12 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
wl->aggr_buf + pkt_offset,
pkt_len, rx_align,
&hlid) == 1) {
- if (hlid < WL12XX_MAX_LINKS)
+ if (hlid < wl->num_links)
__set_bit(hlid, active_hlids);
else
WARN(1,
- "hlid exceeded WL12XX_MAX_LINKS "
- "(%d)\n", hlid);
+ "hlid (%d) exceeded MAX_LINKS\n",
+ hlid);
}
wl->rx_counter++;
@@ -302,7 +302,7 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
{
int ret;
- if (wl->rx_filter_enabled[index] == enable) {
+ if (!!test_bit(index, wl->rx_filter_enabled) == enable) {
wl1271_warning("Request to enable an already "
"enabled rx filter %d", index);
return 0;
@@ -316,7 +316,10 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
return ret;
}
- wl->rx_filter_enabled[index] = enable;
+ if (enable)
+ __set_bit(index, wl->rx_filter_enabled);
+ else
+ __clear_bit(index, wl->rx_filter_enabled);
return 0;
}
@@ -326,7 +329,7 @@ int wl1271_rx_filter_clear_all(struct wl1271 *wl)
int i, ret = 0;
for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
- if (!wl->rx_filter_enabled[i])
+ if (!test_bit(i, wl->rx_filter_enabled))
continue;
ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
if (ret)
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 3363f60fb7da..a3b1618db27c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -142,7 +142,7 @@ struct wl1271_rx_descriptor {
u8 reserved;
} __packed;
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status);
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index b2c018dccf18..dbe826dd7c23 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -211,7 +211,7 @@ static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
u32 chunk_len;
while (len > 0) {
- chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
+ chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
cmd = &wl->buffer_cmd;
busy_buf = wl->buffer_busyword;
@@ -285,7 +285,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
cmd = &commands[0];
i = 0;
while (len > 0) {
- chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
+ chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
*cmd = 0;
*cmd |= WSPI_CMD_WRITE;
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index 8e583497940d..24dd288d6809 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -152,7 +152,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
}
/* Seeking is not supported - old logs are not kept. Disregard pos. */
- len = min(count, (size_t)wl->fwlog_size);
+ len = min_t(size_t, count, wl->fwlog_size);
wl->fwlog_size -= len;
memcpy(buffer, wl->fwlog, len);
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 87cd707affa2..40b43115f835 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -101,7 +101,7 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
* authentication response. this way it won't get de-authed by FW
* when transmitting too soon.
*/
- wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+ wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
/*
* ROC for 1 second on the AP channel for completing the connection.
@@ -134,12 +134,12 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
* into high-level PS and clean out its TX queues.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
- * Note that a single connected STA means 3 active links, since we must
- * account for the global and broadcast AP links. The "fw_ps" check
- * assures us the third link is a STA connected to the AP. Otherwise
- * the FW would not set the PSM bit.
+ * Note that a single connected STA means 2*ap_count + 1 active links,
+ * since we must account for the global and broadcast AP links
+ * for each AP. The "fw_ps" check assures us the other link is a STA
+ * connected to the AP. Otherwise the FW would not set the PSM bit.
*/
- if (wl->active_link_count > 3 && fw_ps &&
+ if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
@@ -234,8 +234,13 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl->tx_blocks_available -= total_blocks;
wl->tx_allocated_blocks += total_blocks;
- /* If the FW was empty before, arm the Tx watchdog */
- if (wl->tx_allocated_blocks == total_blocks)
+ /*
+ * If the FW was empty before, arm the Tx watchdog. Also do
+ * this on the first Tx after resume, as we always cancel the
+ * watchdog on suspend.
+ */
+ if (wl->tx_allocated_blocks == total_blocks ||
+ test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
wl12xx_rearm_tx_watchdog_locked(wl);
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -357,6 +362,10 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ieee80211_has_protected(frame_control))
tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
+ /* send EAPOL frames as voice */
+ if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
+ tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
+
desc->tx_attr = cpu_to_le16(tx_attr);
wlcore_hw_set_tx_desc_csum(wl, desc, skb);
@@ -560,11 +569,11 @@ static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
int i, h, start_hlid;
/* start from the link after the last one */
- start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
+ start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
/* dequeue according to AC, round robin on each link */
- for (i = 0; i < WL12XX_MAX_LINKS; i++) {
- h = (start_hlid + i) % WL12XX_MAX_LINKS;
+ for (i = 0; i < wl->num_links; i++) {
+ h = (start_hlid + i) % wl->num_links;
/* only consider connected stations */
if (!test_bit(h, wlvif->links_map))
@@ -688,8 +697,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
- wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
- WL12XX_MAX_LINKS;
+ wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
+ wl->num_links;
}
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -722,7 +731,7 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
timeout = wl->conf.rx_streaming.duration;
wl12xx_for_each_wlvif_sta(wl, wlvif) {
bool found = false;
- for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+ for_each_set_bit(hlid, active_hlids, wl->num_links) {
if (test_bit(hlid, wlvif->links_map)) {
found = true;
break;
@@ -759,7 +768,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
struct wl1271_tx_hw_descr *desc;
u32 buf_offset = 0, last_len = 0;
bool sent_packets = false;
- unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+ unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
int ret = 0;
int bus_ret = 0;
u8 hlid;
@@ -1061,7 +1070,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
int i;
/* TX failure */
- for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+ for_each_set_bit(i, wlvif->links_map, wl->num_links) {
if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
/* this calls wl12xx_free_link */
@@ -1085,7 +1094,7 @@ void wl12xx_tx_reset(struct wl1271 *wl)
/* only reset the queues if something bad happened */
if (wl1271_tx_total_queue_count(wl) != 0) {
- for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
for (i = 0; i < NUM_TX_QUEUES; i++)
@@ -1178,7 +1187,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
WL1271_TX_FLUSH_TIMEOUT / 1000);
/* forcibly flush all Tx buffers on our queues */
- for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
out_wake:
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 35489c300da1..79cb3ff8b71f 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -37,6 +37,7 @@
#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12)
#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13)
#define TX_HW_ATTR_HOST_ENCRYPT BIT(14)
+#define TX_HW_ATTR_EAPOL_FRAME BIT(15)
#define TX_HW_ATTR_OFST_SAVE_RETRIES 0
#define TX_HW_ATTR_OFST_HEADER_PAD 1
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 06efc12a39e5..95a54504f0cc 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -73,6 +73,8 @@ struct wlcore_ops {
void (*tx_immediate_compl)(struct wl1271 *wl);
int (*hw_init)(struct wl1271 *wl);
int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+ void (*convert_fw_status)(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status);
u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
@@ -220,7 +222,7 @@ struct wl1271 {
int channel;
u8 system_hlid;
- unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+ unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long rate_policies_map[
@@ -228,7 +230,7 @@ struct wl1271 {
unsigned long klv_templates_map[
BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
- u8 session_ids[WL12XX_MAX_LINKS];
+ u8 session_ids[WLCORE_MAX_LINKS];
struct list_head wlvif_list;
@@ -346,8 +348,8 @@ struct wl1271 {
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl_fw_status_1 *fw_status_1;
- struct wl_fw_status_2 *fw_status_2;
+ void *raw_fw_status;
+ struct wl_fw_status *fw_status;
struct wl1271_tx_hw_res_if *tx_res_if;
/* Current chipset configuration */
@@ -376,7 +378,7 @@ struct wl1271 {
* AP-mode - links indexed by HLID. The global and broadcast links
* are always active.
*/
- struct wl1271_link links[WL12XX_MAX_LINKS];
+ struct wl1271_link links[WLCORE_MAX_LINKS];
/* number of currently active links */
int active_link_count;
@@ -405,6 +407,9 @@ struct wl1271 {
/* AP-mode - number of currently connected stations */
int active_sta_count;
+ /* Flag determining whether AP should broadcast OFDM-only rates */
+ bool ofdm_only_ap;
+
/* last wlvif we transmitted from */
struct wl12xx_vif *last_wlvif;
@@ -434,6 +439,10 @@ struct wl1271 {
u32 num_tx_desc;
/* number of RX descriptors the HW supports. */
u32 num_rx_desc;
+ /* number of links the HW supports */
+ u8 num_links;
+ /* max stations a single AP can support */
+ u8 max_ap_stations;
/* translate HW Tx rates to standard rate-indices */
const u8 **band_rate_to_idx;
@@ -448,10 +457,11 @@ struct wl1271 {
struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
/* size of the private FW status data */
+ size_t fw_status_len;
size_t fw_status_priv_len;
/* RX Data filter rule state - enabled/disabled */
- bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
+ unsigned long rx_filter_enabled[BITS_TO_LONGS(WL1271_MAX_RX_FILTERS)];
/* size of the private static data */
size_t static_data_priv_len;
@@ -476,8 +486,9 @@ struct wl1271 {
struct completion nvs_loading_complete;
- /* number of concurrent channels the HW supports */
- u32 num_channels;
+ /* interface combinations supported by the hw */
+ const struct ieee80211_iface_combination *iface_combinations;
+ u8 n_iface_combinations;
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index ce7261ce8b59..756e890bc5ee 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -58,10 +58,15 @@
#define WL1271_DEFAULT_DTIM_PERIOD 1
#define WL12XX_MAX_ROLES 4
-#define WL12XX_MAX_LINKS 12
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+/*
+ * max number of links allowed by all HWs.
+ * this is NOT the actual max links supported by the current hw.
+ */
+#define WLCORE_MAX_LINKS 16
+
/* the driver supports the 2.4Ghz and 5Ghz bands */
#define WLCORE_NUM_BANDS 2
@@ -118,72 +123,58 @@ struct wl1271_chip {
#define NUM_TX_QUEUES 4
-#define AP_MAX_STATIONS 8
-
-struct wl_fw_packet_counters {
- /* Cumulative counter of released packets per AC */
- u8 tx_released_pkts[NUM_TX_QUEUES];
-
- /* Cumulative counter of freed packets per HLID */
- u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
-
- /* Cumulative counter of released Voice memory blocks */
- u8 tx_voice_released_blks;
-
- /* Tx rate of the last transmitted packet */
- u8 tx_last_rate;
-
- u8 padding[2];
-} __packed;
-
-/* FW status registers */
-struct wl_fw_status_1 {
- __le32 intr;
+struct wl_fw_status {
+ u32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
- u8 reserved;
u8 tx_results_counter;
- __le32 rx_pkt_descs[0];
-} __packed;
-
-/*
- * Each HW arch has a different number of Rx descriptors.
- * The length of the status depends on it, since it holds an array
- * of descriptors.
- */
-#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
- (sizeof(struct wl_fw_status_1) + \
- (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
- num_rx_desc)
+ __le32 *rx_pkt_descs;
-struct wl_fw_status_2 {
- __le32 fw_localtime;
+ u32 fw_localtime;
/*
* A bitmap (where each bit represents a single HLID)
* to indicate if the station is in PS mode.
*/
- __le32 link_ps_bitmap;
+ u32 link_ps_bitmap;
/*
* A bitmap (where each bit represents a single HLID) to indicate
* if the station is in Fast mode
*/
- __le32 link_fast_bitmap;
+ u32 link_fast_bitmap;
/* Cumulative counter of total released mem blocks since FW-reset */
- __le32 total_released_blks;
+ u32 total_released_blks;
/* Size (in Memory Blocks) of TX pool */
- __le32 tx_total;
+ u32 tx_total;
+
+ struct {
+ /*
+ * Cumulative counter of released packets per AC
+ * (length of the array is NUM_TX_QUEUES)
+ */
+ u8 *tx_released_pkts;
- struct wl_fw_packet_counters counters;
+ /*
+ * Cumulative counter of freed packets per HLID
+ * (length of the array is wl->num_links)
+ */
+ u8 *tx_lnk_free_pkts;
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
- __le32 log_start_addr;
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+ } counters;
+
+ u32 log_start_addr;
/* Private status to be used by the lower drivers */
- u8 priv[0];
-} __packed;
+ void *priv;
+};
#define WL1271_MAX_CHANNELS 64
struct wl1271_scan {
@@ -240,6 +231,7 @@ enum wl12xx_flags {
WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
WL1271_FLAG_INTENDED_FW_RECOVERY,
WL1271_FLAG_IO_FAILED,
+ WL1271_FLAG_REINIT_TX_WDOG,
};
enum wl12xx_vif_flags {
@@ -368,7 +360,7 @@ struct wl12xx_vif {
/* HLIDs bitmap of associated stations */
unsigned long sta_hlid_map[BITS_TO_LONGS(
- WL12XX_MAX_LINKS)];
+ WLCORE_MAX_LINKS)];
/* recoreded keys - set here before AP startup */
struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
@@ -385,7 +377,7 @@ struct wl12xx_vif {
/* counters of packets per AC, across all links in the vif */
int tx_queue_count[NUM_TX_QUEUES];
- unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+ unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 ssid_len;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index d24d4a958c67..d5c371d77ddf 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -42,8 +42,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/etherdevice.h>
+#include <net/cfg80211.h>
#include <net/iw_handler.h>
@@ -1454,7 +1453,8 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
{
struct wl3501_card *this = netdev_priv(dev);
- wrqu->freq.m = ieee80211_dsss_chan_to_freq(this->chan) * 100000;
+ wrqu->freq.m = 100000 *
+ ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ);
wrqu->freq.e = 1;
return 0;
}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index d39c4178c33a..6f5c793a7855 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -18,7 +18,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/wireless.h>
-#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
#include <net/iw_handler.h>
#include <linux/string.h>
#include <linux/if_arp.h>
@@ -914,11 +914,8 @@ static int zd1201_set_freq(struct net_device *dev,
if (freq->e == 0)
channel = freq->m;
- else {
- channel = ieee80211_freq_to_dsss_chan(freq->m);
- if (channel < 0)
- channel = 0;
- }
+ else
+ channel = ieee80211_frequency_to_channel(freq->m);
err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel);
if (err)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index ae413a2cbee7..89d1d0556b6e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -48,37 +48,19 @@
typedef unsigned int pending_ring_idx_t;
#define INVALID_PENDING_RING_IDX (~0U)
-/* For the head field in pending_tx_info: it is used to indicate
- * whether this tx info is the head of one or more coalesced requests.
- *
- * When head != INVALID_PENDING_RING_IDX, it means the start of a new
- * tx requests queue and the end of previous queue.
- *
- * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
- *
- * ...|0 I I I|5 I|9 I I I|...
- * -->|<-INUSE----------------
- *
- * After consuming the first slot(s) we have:
- *
- * ...|V V V V|5 I|9 I I I|...
- * -----FREE->|<-INUSE--------
- *
- * where V stands for "valid pending ring index". Any number other
- * than INVALID_PENDING_RING_IDX is OK. These entries are considered
- * free and can contain any number other than
- * INVALID_PENDING_RING_IDX. In practice we use 0.
- *
- * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
- * above example) number is the index into pending_tx_info and
- * mmap_pages arrays.
- */
struct pending_tx_info {
- struct xen_netif_tx_request req; /* coalesced tx request */
- pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
- * if it is head of one or more tx
- * reqs
- */
+ struct xen_netif_tx_request req; /* tx request */
+ /* Callback data for released SKBs. The callback is always
+ * xenvif_zerocopy_callback, desc contains the pending_idx, which is
+ * also an index in pending_tx_info array. It is initialized in
+ * xenvif_alloc and it never changes.
+ * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
+ * callback_struct in this array of struct pending_tx_info's, then ctx
+ * to the next, or NULL if there is no more slot for this skb.
+ * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
+ * to this field.
+ */
+ struct ubuf_info callback_struct;
};
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
@@ -99,7 +81,7 @@ struct xenvif_rx_meta {
#define MAX_BUFFER_OFFSET PAGE_SIZE
-#define MAX_PENDING_REQS 256
+#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
/* It's possible for an skb to have a maximal number of frags
* but still be less than MAX_BUFFER_OFFSET in size. Thus the
@@ -108,11 +90,25 @@ struct xenvif_rx_meta {
*/
#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+#define NETBACK_INVALID_HANDLE -1
+
+/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
+ * the maximum slots a valid packet can use. Now this value is defined
+ * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
+ * all backend.
+ */
+#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
unsigned int handle;
+ /* Is this interface disabled? True when backend discovers
+ * frontend is rogue.
+ */
+ bool disabled;
+
/* Use NAPI for guest TX */
struct napi_struct napi;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
@@ -126,13 +122,26 @@ struct xenvif {
pending_ring_idx_t pending_cons;
u16 pending_ring[MAX_PENDING_REQS];
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
-
- /* Coalescing tx requests before copying makes number of grant
- * copy ops greater or equal to number of slots required. In
- * worst case a tx request consumes 2 gnttab_copy.
+ grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+
+ struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+ struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
+ /* passed to gnttab_[un]map_refs with pages under (un)mapping */
+ struct page *pages_to_map[MAX_PENDING_REQS];
+ struct page *pages_to_unmap[MAX_PENDING_REQS];
+
+ /* This prevents zerocopy callbacks to race over dealloc_ring */
+ spinlock_t callback_lock;
+ /* This prevents dealloc thread and NAPI instance to race over response
+ * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
+ * it only protect response creation
*/
- struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
-
+ spinlock_t response_lock;
+ pending_ring_idx_t dealloc_prod;
+ pending_ring_idx_t dealloc_cons;
+ u16 dealloc_ring[MAX_PENDING_REQS];
+ struct task_struct *dealloc_task;
+ wait_queue_head_t dealloc_wq;
/* Use kthread for guest RX */
struct task_struct *task;
@@ -144,6 +153,9 @@ struct xenvif {
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
RING_IDX rx_last_skb_slots;
+ bool rx_queue_purge;
+
+ struct timer_list wake_queue;
/* This array is allocated seperately as it is large */
struct gnttab_copy *grant_copy_op;
@@ -175,6 +187,10 @@ struct xenvif {
/* Statistics */
unsigned long rx_gso_checksum_fixup;
+ unsigned long tx_zerocopy_sent;
+ unsigned long tx_zerocopy_success;
+ unsigned long tx_zerocopy_fail;
+ unsigned long tx_frag_overflow;
/* Miscellaneous private stuff. */
struct net_device *dev;
@@ -216,9 +232,11 @@ void xenvif_carrier_off(struct xenvif *vif);
int xenvif_tx_action(struct xenvif *vif, int budget);
-int xenvif_kthread(void *data);
+int xenvif_kthread_guest_rx(void *data);
void xenvif_kick_thread(struct xenvif *vif);
+int xenvif_dealloc_kthread(void *data);
+
/* Determine whether the needed number of slots (req) are available,
* and set req_event if not.
*/
@@ -226,6 +244,24 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
void xenvif_stop_queue(struct xenvif *vif);
+/* Callback from stack when TX packet can be released */
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+
+/* Unmap a pending page and release it back to the guest */
+void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx);
+
+static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
+{
+ return MAX_PENDING_REQS -
+ vif->pending_prod + vif->pending_cons;
+}
+
+/* Callback from stack when TX packet can be released */
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+
extern bool separate_tx_rx_irq;
+extern unsigned int rx_drain_timeout_msecs;
+extern unsigned int rx_drain_timeout_jiffies;
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 301cc037fda8..ef05c5c49d41 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -38,6 +38,7 @@
#include <xen/events.h>
#include <asm/xen/hypercall.h>
+#include <xen/balloon.h>
#define XENVIF_QUEUE_LENGTH 32
#define XENVIF_NAPI_WEIGHT 64
@@ -62,6 +63,15 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
struct xenvif *vif = container_of(napi, struct xenvif, napi);
int work_done;
+ /* This vif is rogue, we pretend we've there is nothing to do
+ * for this vif to deschedule it from NAPI. But this interface
+ * will be turned off in thread context later.
+ */
+ if (unlikely(vif->disabled)) {
+ napi_complete(napi);
+ return 0;
+ }
+
work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) {
@@ -113,6 +123,18 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void xenvif_wake_queue(unsigned long data)
+{
+ struct xenvif *vif = (struct xenvif *)data;
+
+ if (netif_queue_stopped(vif->dev)) {
+ netdev_err(vif->dev, "draining TX queue\n");
+ vif->rx_queue_purge = true;
+ xenvif_kick_thread(vif);
+ netif_wake_queue(vif->dev);
+ }
+}
+
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
@@ -121,7 +143,9 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(skb->dev != dev);
/* Drop the packet if vif is not ready */
- if (vif->task == NULL || !xenvif_schedulable(vif))
+ if (vif->task == NULL ||
+ vif->dealloc_task == NULL ||
+ !xenvif_schedulable(vif))
goto drop;
/* At best we'll need one slot for the header and one for each
@@ -139,8 +163,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
* then turn off the queue to give the ring a chance to
* drain.
*/
- if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
+ if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
+ vif->wake_queue.function = xenvif_wake_queue;
+ vif->wake_queue.data = (unsigned long)vif;
xenvif_stop_queue(vif);
+ mod_timer(&vif->wake_queue,
+ jiffies + rx_drain_timeout_jiffies);
+ }
skb_queue_tail(&vif->rx_queue, skb);
xenvif_kick_thread(vif);
@@ -233,6 +262,28 @@ static const struct xenvif_stat {
"rx_gso_checksum_fixup",
offsetof(struct xenvif, rx_gso_checksum_fixup)
},
+ /* If (sent != success + fail), there are probably packets never
+ * freed up properly!
+ */
+ {
+ "tx_zerocopy_sent",
+ offsetof(struct xenvif, tx_zerocopy_sent),
+ },
+ {
+ "tx_zerocopy_success",
+ offsetof(struct xenvif, tx_zerocopy_success),
+ },
+ {
+ "tx_zerocopy_fail",
+ offsetof(struct xenvif, tx_zerocopy_fail)
+ },
+ /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
+ * a guest with the same MAX_SKB_FRAG
+ */
+ {
+ "tx_frag_overflow",
+ offsetof(struct xenvif, tx_frag_overflow)
+ },
};
static int xenvif_get_sset_count(struct net_device *dev, int string_set)
@@ -321,11 +372,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->ip_csum = 1;
vif->dev = dev;
+ vif->disabled = false;
+
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
init_timer(&vif->credit_timeout);
vif->credit_window_start = get_jiffies_64();
+ init_timer(&vif->wake_queue);
+
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -342,8 +397,26 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->pending_prod = MAX_PENDING_REQS;
for (i = 0; i < MAX_PENDING_REQS; i++)
vif->pending_ring[i] = i;
- for (i = 0; i < MAX_PENDING_REQS; i++)
- vif->mmap_pages[i] = NULL;
+ spin_lock_init(&vif->callback_lock);
+ spin_lock_init(&vif->response_lock);
+ /* If ballooning is disabled, this will consume real memory, so you
+ * better enable it. The long term solution would be to use just a
+ * bunch of valid page descriptors, without dependency on ballooning
+ */
+ err = alloc_xenballooned_pages(MAX_PENDING_REQS,
+ vif->mmap_pages,
+ false);
+ if (err) {
+ netdev_err(dev, "Could not reserve mmap_pages\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ for (i = 0; i < MAX_PENDING_REQS; i++) {
+ vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
+ { .callback = xenvif_zerocopy_callback,
+ .ctx = NULL,
+ .desc = i };
+ vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
+ }
/*
* Initialise a dummy MAC address. We choose the numerically
@@ -381,12 +454,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
BUG_ON(vif->tx_irq);
BUG_ON(vif->task);
+ BUG_ON(vif->dealloc_task);
err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
init_waitqueue_head(&vif->wq);
+ init_waitqueue_head(&vif->dealloc_wq);
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
@@ -420,8 +495,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->rx_irq);
}
- task = kthread_create(xenvif_kthread,
- (void *)vif, "%s", vif->dev->name);
+ task = kthread_create(xenvif_kthread_guest_rx,
+ (void *)vif, "%s-guest-rx", vif->dev->name);
if (IS_ERR(task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
err = PTR_ERR(task);
@@ -430,6 +505,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
vif->task = task;
+ task = kthread_create(xenvif_dealloc_kthread,
+ (void *)vif, "%s-dealloc", vif->dev->name);
+ if (IS_ERR(task)) {
+ pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
+ err = PTR_ERR(task);
+ goto err_rx_unbind;
+ }
+
+ vif->dealloc_task = task;
+
rtnl_lock();
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
@@ -440,6 +525,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
rtnl_unlock();
wake_up_process(vif->task);
+ wake_up_process(vif->dealloc_task);
return 0;
@@ -473,10 +559,16 @@ void xenvif_disconnect(struct xenvif *vif)
xenvif_carrier_off(vif);
if (vif->task) {
+ del_timer_sync(&vif->wake_queue);
kthread_stop(vif->task);
vif->task = NULL;
}
+ if (vif->dealloc_task) {
+ kthread_stop(vif->dealloc_task);
+ vif->dealloc_task = NULL;
+ }
+
if (vif->tx_irq) {
if (vif->tx_irq == vif->rx_irq)
unbind_from_irqhandler(vif->tx_irq, vif);
@@ -492,6 +584,43 @@ void xenvif_disconnect(struct xenvif *vif)
void xenvif_free(struct xenvif *vif)
{
+ int i, unmap_timeout = 0;
+ /* Here we want to avoid timeout messages if an skb can be legitimately
+ * stuck somewhere else. Realistically this could be an another vif's
+ * internal or QDisc queue. That another vif also has this
+ * rx_drain_timeout_msecs timeout, but the timer only ditches the
+ * internal queue. After that, the QDisc queue can put in worst case
+ * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
+ * internal queue, so we need several rounds of such timeouts until we
+ * can be sure that no another vif should have skb's from us. We are
+ * not sending more skb's, so newly stuck packets are not interesting
+ * for us here.
+ */
+ unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
+ DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
+
+ for (i = 0; i < MAX_PENDING_REQS; ++i) {
+ if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
+ unmap_timeout++;
+ schedule_timeout(msecs_to_jiffies(1000));
+ if (unmap_timeout > worst_case_skb_lifetime &&
+ net_ratelimit())
+ netdev_err(vif->dev,
+ "Page still granted! Index: %x\n",
+ i);
+ /* If there are still unmapped pages, reset the loop to
+ * start checking again. We shouldn't exit here until
+ * dealloc thread and NAPI instance release all the
+ * pages. If a kernel bug causes the skbs to stall
+ * somewhere, the interface cannot be brought down
+ * properly.
+ */
+ i = -1;
+ }
+ }
+
+ free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
+
netif_napi_del(&vif->napi);
unregister_netdev(vif->dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 438d0c09b7e6..3f021e054ba1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <linux/if_vlan.h>
#include <linux/udp.h>
+#include <linux/highmem.h>
#include <net/tcp.h>
@@ -54,6 +55,13 @@
bool separate_tx_rx_irq = 1;
module_param(separate_tx_rx_irq, bool, 0644);
+/* When guest ring is filled up, qdisc queues the packets for us, but we have
+ * to timeout them, otherwise other guests' packets can get stuck there
+ */
+unsigned int rx_drain_timeout_msecs = 10000;
+module_param(rx_drain_timeout_msecs, uint, 0444);
+unsigned int rx_drain_timeout_jiffies;
+
/*
* This is the maximum slots a skb can have. If a guest sends a skb
* which exceeds this limit it is considered malicious.
@@ -62,24 +70,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);
-/*
- * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
- * the maximum slots a valid packet can use. Now this value is defined
- * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
- * all backend.
- */
-#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
-
-/*
- * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
- * one or more merged tx requests, otherwise it is the continuation of
- * previous tx request.
- */
-static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
-{
- return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
-}
-
static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
u8 status);
@@ -109,6 +99,21 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
}
+#define callback_param(vif, pending_idx) \
+ (vif->pending_tx_info[pending_idx].callback_struct)
+
+/* Find the containing VIF's structure from a pointer in pending_tx_info array
+ */
+static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+{
+ u16 pending_idx = ubuf->desc;
+ struct pending_tx_info *temp =
+ container_of(ubuf, struct pending_tx_info, callback_struct);
+ return container_of(temp - pending_idx,
+ struct xenvif,
+ pending_tx_info[0]);
+}
+
/* This is a miniumum size for the linear area to avoid lots of
* calls to __pskb_pull_tail() as we set up checksum offsets. The
* value 128 was chosen as it covers all IPv4 and most likely
@@ -131,12 +136,6 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
-{
- return MAX_PENDING_REQS -
- vif->pending_prod + vif->pending_cons;
-}
-
bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
{
RING_IDX prod, cons;
@@ -192,8 +191,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* into multiple copies tend to give large frags their
* own buffers as before.
*/
- if ((offset + size > MAX_BUFFER_OFFSET) &&
- (size <= MAX_BUFFER_OFFSET) && offset && !head)
+ BUG_ON(size > MAX_BUFFER_OFFSET);
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
@@ -235,7 +234,9 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
- unsigned long offset, int *head)
+ unsigned long offset, int *head,
+ struct xenvif *foreign_vif,
+ grant_ref_t foreign_gref)
{
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
@@ -277,8 +278,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes;
- copy_gop->source.domid = DOMID_SELF;
- copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
+ if (foreign_vif) {
+ copy_gop->source.domid = foreign_vif->domid;
+ copy_gop->source.u.ref = foreign_gref;
+ copy_gop->flags |= GNTCOPY_source_gref;
+ } else {
+ copy_gop->source.domid = DOMID_SELF;
+ copy_gop->source.u.gmfn =
+ virt_to_mfn(page_address(page));
+ }
copy_gop->source.offset = offset;
copy_gop->dest.domid = vif->domid;
@@ -338,6 +346,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
+ struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+ grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
+ struct xenvif *foreign_vif = NULL;
old_meta_prod = npo->meta_prod;
@@ -375,6 +386,19 @@ static int xenvif_gop_skb(struct sk_buff *skb,
npo->copy_off = 0;
npo->copy_gref = req->gref;
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+ (ubuf->callback == &xenvif_zerocopy_callback)) {
+ int i = 0;
+ foreign_vif = ubuf_to_vif(ubuf);
+
+ do {
+ u16 pending_idx = ubuf->desc;
+ foreign_grefs[i++] =
+ foreign_vif->pending_tx_info[pending_idx].req.gref;
+ ubuf = (struct ubuf_info *) ubuf->ctx;
+ } while (ubuf);
+ }
+
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
@@ -384,7 +408,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
len = skb_tail_pointer(skb) - data;
xenvif_gop_frag_copy(vif, skb, npo,
- virt_to_page(data), len, offset, &head);
+ virt_to_page(data), len, offset, &head,
+ NULL,
+ 0);
data += len;
}
@@ -393,7 +419,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
- &head);
+ &head,
+ foreign_vif,
+ foreign_grefs[i]);
}
return npo->meta_prod - old_meta_prod;
@@ -451,10 +479,12 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
}
}
-struct skb_cb_overlay {
+struct xenvif_rx_cb {
int meta_slots_used;
};
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
void xenvif_kick_thread(struct xenvif *vif)
{
wake_up(&vif->wq);
@@ -470,7 +500,6 @@ static void xenvif_rx_action(struct xenvif *vif)
LIST_HEAD(notify);
int ret;
unsigned long offset;
- struct skb_cb_overlay *sco;
bool need_to_notify = false;
struct netrx_pending_operations npo = {
@@ -482,6 +511,8 @@ static void xenvif_rx_action(struct xenvif *vif)
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
RING_IDX max_slots_needed;
+ RING_IDX old_req_cons;
+ RING_IDX ring_slots_used;
int i;
/* We need a cheap worse case estimate for the number of
@@ -493,9 +524,28 @@ static void xenvif_rx_action(struct xenvif *vif)
PAGE_SIZE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned int size;
+ unsigned int offset;
+
size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
- max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+ offset = skb_shinfo(skb)->frags[i].page_offset;
+
+ /* For a worse-case estimate we need to factor in
+ * the fragment page offset as this will affect the
+ * number of times xenvif_gop_frag_copy() will
+ * call start_new_rx_buffer().
+ */
+ max_slots_needed += DIV_ROUND_UP(offset + size,
+ PAGE_SIZE);
}
+
+ /* To avoid the estimate becoming too pessimal for some
+ * frontends that limit posted rx requests, cap the estimate
+ * at MAX_SKB_FRAGS.
+ */
+ if (max_slots_needed > MAX_SKB_FRAGS)
+ max_slots_needed = MAX_SKB_FRAGS;
+
+ /* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
@@ -510,9 +560,11 @@ static void xenvif_rx_action(struct xenvif *vif)
} else
vif->rx_last_skb_slots = 0;
- sco = (struct skb_cb_overlay *)skb->cb;
- sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
- BUG_ON(sco->meta_slots_used > max_slots_needed);
+ old_req_cons = vif->rx.req_cons;
+ XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
+ ring_slots_used = vif->rx.req_cons - old_req_cons;
+
+ BUG_ON(ring_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
}
@@ -526,7 +578,6 @@ static void xenvif_rx_action(struct xenvif *vif)
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
- sco = (struct skb_cb_overlay *)skb->cb;
if ((1 << vif->meta[npo.meta_cons].gso_type) &
vif->gso_prefix_mask) {
@@ -537,19 +588,21 @@ static void xenvif_rx_action(struct xenvif *vif)
resp->offset = vif->meta[npo.meta_cons].gso_size;
resp->id = vif->meta[npo.meta_cons].id;
- resp->status = sco->meta_slots_used;
+ resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
npo.meta_cons++;
- sco->meta_slots_used--;
+ XENVIF_RX_CB(skb)->meta_slots_used--;
}
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
+ status = xenvif_check_gop(vif,
+ XENVIF_RX_CB(skb)->meta_slots_used,
+ &npo);
- if (sco->meta_slots_used == 1)
+ if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
flags = 0;
else
flags = XEN_NETRXF_more_data;
@@ -586,13 +639,13 @@ static void xenvif_rx_action(struct xenvif *vif)
xenvif_add_frag_responses(vif, status,
vif->meta + npo.meta_cons + 1,
- sco->meta_slots_used);
+ XENVIF_RX_CB(skb)->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
need_to_notify |= !!ret;
- npo.meta_cons += sco->meta_slots_used;
+ npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
dev_kfree_skb(skb);
}
@@ -642,9 +695,12 @@ static void xenvif_tx_err(struct xenvif *vif,
struct xen_netif_tx_request *txp, RING_IDX end)
{
RING_IDX cons = vif->tx.req_cons;
+ unsigned long flags;
do {
+ spin_lock_irqsave(&vif->response_lock, flags);
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+ spin_unlock_irqrestore(&vif->response_lock, flags);
if (cons == end)
break;
txp = RING_GET_REQUEST(&vif->tx, cons++);
@@ -655,7 +711,8 @@ static void xenvif_tx_err(struct xenvif *vif,
static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
- xenvif_carrier_off(vif);
+ vif->disabled = true;
+ xenvif_kick_thread(vif);
}
static int xenvif_count_requests(struct xenvif *vif,
@@ -756,180 +813,168 @@ static int xenvif_count_requests(struct xenvif *vif,
return slots;
}
-static struct page *xenvif_alloc_page(struct xenvif *vif,
- u16 pending_idx)
+
+struct xenvif_tx_cb {
+ u16 pending_idx;
+};
+
+#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
+
+static inline void xenvif_tx_create_gop(struct xenvif *vif,
+ u16 pending_idx,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_map_grant_ref *gop)
{
- struct page *page;
+ vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
+ gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx),
+ GNTMAP_host_map | GNTMAP_readonly,
+ txp->gref, vif->domid);
+
+ memcpy(&vif->pending_tx_info[pending_idx].req, txp,
+ sizeof(*txp));
+}
- page = alloc_page(GFP_ATOMIC|__GFP_COLD);
- if (!page)
+static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
+{
+ struct sk_buff *skb =
+ alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(skb == NULL))
return NULL;
- vif->mmap_pages[pending_idx] = page;
- return page;
+ /* Packets passed to netif_rx() must have some headroom. */
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ /* Initialize it here to avoid later surprises */
+ skb_shinfo(skb)->destructor_arg = NULL;
+
+ return skb;
}
-static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
- struct sk_buff *skb,
- struct xen_netif_tx_request *txp,
- struct gnttab_copy *gop)
+static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_map_grant_ref *gop)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
- u16 pending_idx = *((u16 *)skb->data);
- u16 head_idx = 0;
- int slot, start;
- struct page *page;
- pending_ring_idx_t index, start_idx = 0;
- uint16_t dst_offset;
- unsigned int nr_slots;
- struct pending_tx_info *first = NULL;
+ u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ int start;
+ pending_ring_idx_t index;
+ unsigned int nr_slots, frag_overflow = 0;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
+ if (shinfo->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ shinfo->nr_frags = MAX_SKB_FRAGS;
+ }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
- /* Coalesce tx requests, at this point the packet passed in
- * should be <= 64K. Any packets larger than 64K have been
- * handled in xenvif_count_requests().
- */
- for (shinfo->nr_frags = slot = start; slot < nr_slots;
- shinfo->nr_frags++) {
- struct pending_tx_info *pending_tx_info =
- vif->pending_tx_info;
+ for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
+ shinfo->nr_frags++, txp++, gop++) {
+ index = pending_index(vif->pending_cons++);
+ pending_idx = vif->pending_ring[index];
+ xenvif_tx_create_gop(vif, pending_idx, txp, gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+ }
- page = alloc_page(GFP_ATOMIC|__GFP_COLD);
- if (!page)
- goto err;
-
- dst_offset = 0;
- first = NULL;
- while (dst_offset < PAGE_SIZE && slot < nr_slots) {
- gop->flags = GNTCOPY_source_gref;
-
- gop->source.u.ref = txp->gref;
- gop->source.domid = vif->domid;
- gop->source.offset = txp->offset;
-
- gop->dest.domid = DOMID_SELF;
-
- gop->dest.offset = dst_offset;
- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
-
- if (dst_offset + txp->size > PAGE_SIZE) {
- /* This page can only merge a portion
- * of tx request. Do not increment any
- * pointer / counter here. The txp
- * will be dealt with in future
- * rounds, eventually hitting the
- * `else` branch.
- */
- gop->len = PAGE_SIZE - dst_offset;
- txp->offset += gop->len;
- txp->size -= gop->len;
- dst_offset += gop->len; /* quit loop */
- } else {
- /* This tx request can be merged in the page */
- gop->len = txp->size;
- dst_offset += gop->len;
-
- index = pending_index(vif->pending_cons++);
-
- pending_idx = vif->pending_ring[index];
-
- memcpy(&pending_tx_info[pending_idx].req, txp,
- sizeof(*txp));
-
- /* Poison these fields, corresponding
- * fields for head tx req will be set
- * to correct values after the loop.
- */
- vif->mmap_pages[pending_idx] = (void *)(~0UL);
- pending_tx_info[pending_idx].head =
- INVALID_PENDING_RING_IDX;
-
- if (!first) {
- first = &pending_tx_info[pending_idx];
- start_idx = index;
- head_idx = pending_idx;
- }
-
- txp++;
- slot++;
- }
+ if (frag_overflow) {
+ struct sk_buff *nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ return NULL;
+ }
+
+ shinfo = skb_shinfo(nskb);
+ frags = shinfo->frags;
- gop++;
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
+ shinfo->nr_frags++, txp++, gop++) {
+ index = pending_index(vif->pending_cons++);
+ pending_idx = vif->pending_ring[index];
+ xenvif_tx_create_gop(vif, pending_idx, txp, gop);
+ frag_set_pending_idx(&frags[shinfo->nr_frags],
+ pending_idx);
}
- first->req.offset = 0;
- first->req.size = dst_offset;
- first->head = start_idx;
- vif->mmap_pages[head_idx] = page;
- frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
+ skb_shinfo(skb)->frag_list = nskb;
}
- BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
-
return gop;
-err:
- /* Unwind, freeing all pages and sending error responses. */
- while (shinfo->nr_frags-- > start) {
- xenvif_idx_release(vif,
- frag_get_pending_idx(&frags[shinfo->nr_frags]),
- XEN_NETIF_RSP_ERROR);
+}
+
+static inline void xenvif_grant_handle_set(struct xenvif *vif,
+ u16 pending_idx,
+ grant_handle_t handle)
+{
+ if (unlikely(vif->grant_tx_handle[pending_idx] !=
+ NETBACK_INVALID_HANDLE)) {
+ netdev_err(vif->dev,
+ "Trying to overwrite active handle! pending_idx: %x\n",
+ pending_idx);
+ BUG();
}
- /* The head too, if necessary. */
- if (start)
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ vif->grant_tx_handle[pending_idx] = handle;
+}
- return NULL;
+static inline void xenvif_grant_handle_reset(struct xenvif *vif,
+ u16 pending_idx)
+{
+ if (unlikely(vif->grant_tx_handle[pending_idx] ==
+ NETBACK_INVALID_HANDLE)) {
+ netdev_err(vif->dev,
+ "Trying to unmap invalid handle! pending_idx: %x\n",
+ pending_idx);
+ BUG();
+ }
+ vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
}
static int xenvif_tx_check_gop(struct xenvif *vif,
struct sk_buff *skb,
- struct gnttab_copy **gopp)
+ struct gnttab_map_grant_ref **gopp)
{
- struct gnttab_copy *gop = *gopp;
- u16 pending_idx = *((u16 *)skb->data);
+ struct gnttab_map_grant_ref *gop = *gopp;
+ u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct pending_tx_info *tx_info;
int nr_frags = shinfo->nr_frags;
int i, err, start;
- u16 peek; /* peek into next tx request */
+ struct sk_buff *first_skb = NULL;
/* Check status of header. */
err = gop->status;
if (unlikely(err))
xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ else
+ xenvif_grant_handle_set(vif, pending_idx , gop->handle);
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+check_frags:
for (i = start; i < nr_frags; i++) {
int j, newerr;
- pending_ring_idx_t head;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
tx_info = &vif->pending_tx_info[pending_idx];
- head = tx_info->head;
/* Check error status: if okay then remember grant handle. */
- do {
- newerr = (++gop)->status;
- if (newerr)
- break;
- peek = vif->pending_ring[pending_index(++head)];
- } while (!pending_tx_is_head(vif, peek));
+ newerr = (++gop)->status;
if (likely(!newerr)) {
+ xenvif_grant_handle_set(vif, pending_idx , gop->handle);
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xenvif_idx_release(vif, pending_idx,
- XEN_NETIF_RSP_OKAY);
+ xenvif_idx_unmap(vif, pending_idx);
continue;
}
@@ -939,20 +984,45 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
-
/* First error: invalidate header and preceding fragments. */
- pending_idx = *((u16 *)skb->data);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+ if (!first_skb)
+ pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ else
+ pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ xenvif_idx_unmap(vif, pending_idx);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_release(vif, pending_idx,
- XEN_NETIF_RSP_OKAY);
+ xenvif_idx_unmap(vif, pending_idx);
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
+ if (skb_has_frag_list(skb)) {
+ first_skb = skb;
+ skb = shinfo->frag_list;
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+ start = 0;
+
+ goto check_frags;
+ }
+
+ /* There was a mapping error in the frag_list skb. We have to unmap
+ * the first skb's frags
+ */
+ if (first_skb && err) {
+ int j;
+ shinfo = skb_shinfo(first_skb);
+ pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+ for (j = start; j < shinfo->nr_frags; j++) {
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+ xenvif_idx_unmap(vif, pending_idx);
+ }
+ }
+
*gopp = gop + 1;
return err;
}
@@ -962,6 +1032,10 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i;
+ u16 prev_pending_idx = INVALID_PENDING_IDX;
+
+ if (skb_shinfo(skb)->destructor_arg)
+ prev_pending_idx = XENVIF_TX_CB(skb)->pending_idx;
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
@@ -971,6 +1045,17 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
pending_idx = frag_get_pending_idx(frag);
+ /* If this is not the first frag, chain it to the previous*/
+ if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
+ skb_shinfo(skb)->destructor_arg =
+ &callback_param(vif, pending_idx);
+ else if (likely(pending_idx != prev_pending_idx))
+ callback_param(vif, prev_pending_idx).ctx =
+ &callback_param(vif, pending_idx);
+
+ callback_param(vif, pending_idx).ctx = NULL;
+ prev_pending_idx = pending_idx;
+
txp = &vif->pending_tx_info[pending_idx].req;
page = virt_to_page(idx_to_kaddr(vif, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
@@ -978,10 +1063,15 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
skb->data_len += txp->size;
skb->truesize += txp->size;
- /* Take an extra reference to offset xenvif_idx_release */
+ /* Take an extra reference to offset network stack's put_page */
get_page(vif->mmap_pages[pending_idx]);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
}
+ /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
+ * overlaps with "index", and "mapping" is not set. I think mapping
+ * should be set. If delivered to local stack, it would drop this
+ * skb in sk_filter unless the socket has the right to use it.
+ */
+ skb->pfmemalloc = false;
}
static int xenvif_get_extras(struct xenvif *vif,
@@ -1101,16 +1191,13 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
{
- struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
+ struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
struct sk_buff *skb;
int ret;
- while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS) &&
- (skb_queue_len(&vif->tx_queue) < budget)) {
+ while (skb_queue_len(&vif->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
- struct page *page;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
RING_IDX idx;
@@ -1126,7 +1213,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
xenvif_fatal_tx_err(vif);
- continue;
+ break;
}
work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
@@ -1186,8 +1273,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
PKT_PROT_LEN : txreq.size;
- skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
@@ -1195,9 +1281,6 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
break;
}
- /* Packets passed to netif_rx() must have some headroom. */
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1209,31 +1292,11 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
}
}
- /* XXX could copy straight to head */
- page = xenvif_alloc_page(vif, pending_idx);
- if (!page) {
- kfree_skb(skb);
- xenvif_tx_err(vif, &txreq, idx);
- break;
- }
-
- gop->source.u.ref = txreq.gref;
- gop->source.domid = vif->domid;
- gop->source.offset = txreq.offset;
-
- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
- gop->dest.domid = DOMID_SELF;
- gop->dest.offset = txreq.offset;
-
- gop->len = txreq.size;
- gop->flags = GNTCOPY_source_gref;
+ xenvif_tx_create_gop(vif, pending_idx, &txreq, gop);
gop++;
- memcpy(&vif->pending_tx_info[pending_idx].req,
- &txreq, sizeof(txreq));
- vif->pending_tx_info[pending_idx].head = index;
- *((u16 *)skb->data) = pending_idx;
+ XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
@@ -1261,17 +1324,82 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
vif->tx.req_cons = idx;
- if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
+ if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops))
break;
}
- return gop - vif->tx_copy_ops;
+ return gop - vif->tx_map_ops;
}
+/* Consolidate skb with a frag_list into a brand new one with local pages on
+ * frags. Returns 0 or -ENOMEM if can't allocate new pages.
+ */
+static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
+{
+ unsigned int offset = skb_headlen(skb);
+ skb_frag_t frags[MAX_SKB_FRAGS];
+ int i;
+ struct ubuf_info *uarg;
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+
+ vif->tx_zerocopy_sent += 2;
+ vif->tx_frag_overflow++;
+
+ xenvif_fill_frags(vif, nskb);
+ /* Subtract frags size, we will correct it later */
+ skb->truesize -= skb->data_len;
+ skb->len += nskb->len;
+ skb->data_len += nskb->len;
+
+ /* create a brand new frags array and coalesce there */
+ for (i = 0; offset < skb->len; i++) {
+ struct page *page;
+ unsigned int len;
+
+ BUG_ON(i >= MAX_SKB_FRAGS);
+ page = alloc_page(GFP_ATOMIC|__GFP_COLD);
+ if (!page) {
+ int j;
+ skb->truesize += skb->data_len;
+ for (j = 0; j < i; j++)
+ put_page(frags[j].page.p);
+ return -ENOMEM;
+ }
+
+ if (offset + PAGE_SIZE < skb->len)
+ len = PAGE_SIZE;
+ else
+ len = skb->len - offset;
+ if (skb_copy_bits(skb, offset, page_address(page), len))
+ BUG();
+
+ offset += len;
+ frags[i].page.p = page;
+ frags[i].page_offset = 0;
+ skb_frag_size_set(&frags[i], len);
+ }
+ /* swap out with old one */
+ memcpy(skb_shinfo(skb)->frags,
+ frags,
+ i * sizeof(skb_frag_t));
+ skb_shinfo(skb)->nr_frags = i;
+ skb->truesize += i * PAGE_SIZE;
+
+ /* remove traces of mapped pages and frag_list */
+ skb_frag_list_init(skb);
+ uarg = skb_shinfo(skb)->destructor_arg;
+ uarg->callback(uarg, true);
+ skb_shinfo(skb)->destructor_arg = NULL;
+
+ skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ kfree_skb(nskb);
+
+ return 0;
+}
static int xenvif_tx_submit(struct xenvif *vif)
{
- struct gnttab_copy *gop = vif->tx_copy_ops;
+ struct gnttab_map_grant_ref *gop = vif->tx_map_ops;
struct sk_buff *skb;
int work_done = 0;
@@ -1280,7 +1408,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
u16 pending_idx;
unsigned data_len;
- pending_idx = *((u16 *)skb->data);
+ pending_idx = XENVIF_TX_CB(skb)->pending_idx;
txp = &vif->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
@@ -1295,14 +1423,16 @@ static int xenvif_tx_submit(struct xenvif *vif)
memcpy(skb->data,
(void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
data_len);
+ callback_param(vif, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
+ skb_shinfo(skb)->destructor_arg =
+ &callback_param(vif, pending_idx);
} else {
/* Schedule a response immediately. */
- xenvif_idx_release(vif, pending_idx,
- XEN_NETIF_RSP_OKAY);
+ xenvif_idx_unmap(vif, pending_idx);
}
if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1312,6 +1442,17 @@ static int xenvif_tx_submit(struct xenvif *vif)
xenvif_fill_frags(vif, skb);
+ if (unlikely(skb_has_frag_list(skb))) {
+ if (xenvif_handle_frag_list(vif, skb)) {
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Not enough memory to consolidate frag_list!\n");
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ kfree_skb(skb);
+ continue;
+ }
+ }
+
if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
int target = min_t(int, skb->len, PKT_PROT_LEN);
__pskb_pull_tail(skb, target - skb_headlen(skb));
@@ -1324,6 +1465,9 @@ static int xenvif_tx_submit(struct xenvif *vif)
if (checksum_setup(vif, skb)) {
netdev_dbg(vif->dev,
"Can't setup checksum in net_tx_action\n");
+ /* We have to set this flag to trigger the callback */
+ if (skb_shinfo(skb)->destructor_arg)
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
kfree_skb(skb);
continue;
}
@@ -1349,17 +1493,126 @@ static int xenvif_tx_submit(struct xenvif *vif)
work_done++;
+ /* Set this flag right before netif_receive_skb, otherwise
+ * someone might think this packet already left netback, and
+ * do a skb_copy_ubufs while we are still in control of the
+ * skb. E.g. the __pskb_pull_tail earlier can do such thing.
+ */
+ if (skb_shinfo(skb)->destructor_arg) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ vif->tx_zerocopy_sent++;
+ }
+
netif_receive_skb(skb);
}
return work_done;
}
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
+{
+ unsigned long flags;
+ pending_ring_idx_t index;
+ struct xenvif *vif = ubuf_to_vif(ubuf);
+
+ /* This is the only place where we grab this lock, to protect callbacks
+ * from each other.
+ */
+ spin_lock_irqsave(&vif->callback_lock, flags);
+ do {
+ u16 pending_idx = ubuf->desc;
+ ubuf = (struct ubuf_info *) ubuf->ctx;
+ BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
+ MAX_PENDING_REQS);
+ index = pending_index(vif->dealloc_prod);
+ vif->dealloc_ring[index] = pending_idx;
+ /* Sync with xenvif_tx_dealloc_action:
+ * insert idx then incr producer.
+ */
+ smp_wmb();
+ vif->dealloc_prod++;
+ } while (ubuf);
+ wake_up(&vif->dealloc_wq);
+ spin_unlock_irqrestore(&vif->callback_lock, flags);
+
+ if (likely(zerocopy_success))
+ vif->tx_zerocopy_success++;
+ else
+ vif->tx_zerocopy_fail++;
+}
+
+static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
+{
+ struct gnttab_unmap_grant_ref *gop;
+ pending_ring_idx_t dc, dp;
+ u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
+ unsigned int i = 0;
+
+ dc = vif->dealloc_cons;
+ gop = vif->tx_unmap_ops;
+
+ /* Free up any grants we have finished using */
+ do {
+ dp = vif->dealloc_prod;
+
+ /* Ensure we see all indices enqueued by all
+ * xenvif_zerocopy_callback().
+ */
+ smp_rmb();
+
+ while (dc != dp) {
+ BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
+ pending_idx =
+ vif->dealloc_ring[pending_index(dc++)];
+
+ pending_idx_release[gop-vif->tx_unmap_ops] =
+ pending_idx;
+ vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
+ vif->mmap_pages[pending_idx];
+ gnttab_set_unmap_op(gop,
+ idx_to_kaddr(vif, pending_idx),
+ GNTMAP_host_map,
+ vif->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(vif, pending_idx);
+ ++gop;
+ }
+
+ } while (dp != vif->dealloc_prod);
+
+ vif->dealloc_cons = dc;
+
+ if (gop - vif->tx_unmap_ops > 0) {
+ int ret;
+ ret = gnttab_unmap_refs(vif->tx_unmap_ops,
+ NULL,
+ vif->pages_to_unmap,
+ gop - vif->tx_unmap_ops);
+ if (ret) {
+ netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+ gop - vif->tx_unmap_ops, ret);
+ for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
+ if (gop[i].status != GNTST_okay)
+ netdev_err(vif->dev,
+ " host_addr: %llx handle: %x status: %d\n",
+ gop[i].host_addr,
+ gop[i].handle,
+ gop[i].status);
+ }
+ BUG();
+ }
+ }
+
+ for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
+ xenvif_idx_release(vif, pending_idx_release[i],
+ XEN_NETIF_RSP_OKAY);
+}
+
+
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif *vif, int budget)
{
unsigned nr_gops;
- int work_done;
+ int work_done, ret;
if (unlikely(!tx_work_todo(vif)))
return 0;
@@ -1369,7 +1622,11 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
if (nr_gops == 0)
return 0;
- gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
+ ret = gnttab_map_refs(vif->tx_map_ops,
+ NULL,
+ vif->pages_to_map,
+ nr_gops);
+ BUG_ON(ret);
work_done = xenvif_tx_submit(vif);
@@ -1380,45 +1637,18 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
u8 status)
{
struct pending_tx_info *pending_tx_info;
- pending_ring_idx_t head;
- u16 peek; /* peek into next tx request */
-
- BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
-
- /* Already complete? */
- if (vif->mmap_pages[pending_idx] == NULL)
- return;
+ pending_ring_idx_t index;
+ unsigned long flags;
pending_tx_info = &vif->pending_tx_info[pending_idx];
-
- head = pending_tx_info->head;
-
- BUG_ON(!pending_tx_is_head(vif, head));
- BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
-
- do {
- pending_ring_idx_t index;
- pending_ring_idx_t idx = pending_index(head);
- u16 info_idx = vif->pending_ring[idx];
-
- pending_tx_info = &vif->pending_tx_info[info_idx];
- make_tx_response(vif, &pending_tx_info->req, status);
-
- /* Setting any number other than
- * INVALID_PENDING_RING_IDX indicates this slot is
- * starting a new packet / ending a previous packet.
- */
- pending_tx_info->head = 0;
-
- index = pending_index(vif->pending_prod++);
- vif->pending_ring[index] = vif->pending_ring[info_idx];
-
- peek = vif->pending_ring[pending_index(++head)];
-
- } while (!pending_tx_is_head(vif, peek));
-
- put_page(vif->mmap_pages[pending_idx]);
- vif->mmap_pages[pending_idx] = NULL;
+ spin_lock_irqsave(&vif->response_lock, flags);
+ make_tx_response(vif, &pending_tx_info->req, status);
+ index = pending_index(vif->pending_prod);
+ vif->pending_ring[index] = pending_idx;
+ /* TX shouldn't use the index before we give it back here */
+ mb();
+ vif->pending_prod++;
+ spin_unlock_irqrestore(&vif->response_lock, flags);
}
@@ -1466,23 +1696,54 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
return resp;
}
+void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
+{
+ int ret;
+ struct gnttab_unmap_grant_ref tx_unmap_op;
+
+ gnttab_set_unmap_op(&tx_unmap_op,
+ idx_to_kaddr(vif, pending_idx),
+ GNTMAP_host_map,
+ vif->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(vif, pending_idx);
+
+ ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
+ &vif->mmap_pages[pending_idx], 1);
+ if (ret) {
+ netdev_err(vif->dev,
+ "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
+ ret,
+ pending_idx,
+ tx_unmap_op.host_addr,
+ tx_unmap_op.handle,
+ tx_unmap_op.status);
+ BUG();
+ }
+
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+}
+
static inline int rx_work_todo(struct xenvif *vif)
{
- return !skb_queue_empty(&vif->rx_queue) &&
- xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
+ return (!skb_queue_empty(&vif->rx_queue) &&
+ xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
+ vif->rx_queue_purge;
}
static inline int tx_work_todo(struct xenvif *vif)
{
- if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
- (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS))
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
return 1;
return 0;
}
+static inline bool tx_dealloc_work_todo(struct xenvif *vif)
+{
+ return vif->dealloc_cons != vif->dealloc_prod;
+}
+
void xenvif_unmap_frontend_rings(struct xenvif *vif)
{
if (vif->tx.sring)
@@ -1540,7 +1801,7 @@ static void xenvif_start_queue(struct xenvif *vif)
netif_wake_queue(vif->dev);
}
-int xenvif_kthread(void *data)
+int xenvif_kthread_guest_rx(void *data)
{
struct xenvif *vif = data;
struct sk_buff *skb;
@@ -1548,16 +1809,34 @@ int xenvif_kthread(void *data)
while (!kthread_should_stop()) {
wait_event_interruptible(vif->wq,
rx_work_todo(vif) ||
+ vif->disabled ||
kthread_should_stop());
+
+ /* This frontend is found to be rogue, disable it in
+ * kthread context. Currently this is only set when
+ * netback finds out frontend sends malformed packet,
+ * but we cannot disable the interface in softirq
+ * context so we defer it here.
+ */
+ if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
+ xenvif_carrier_off(vif);
+
if (kthread_should_stop())
break;
+ if (vif->rx_queue_purge) {
+ skb_queue_purge(&vif->rx_queue);
+ vif->rx_queue_purge = false;
+ }
+
if (!skb_queue_empty(&vif->rx_queue))
xenvif_rx_action(vif);
if (skb_queue_empty(&vif->rx_queue) &&
- netif_queue_stopped(vif->dev))
+ netif_queue_stopped(vif->dev)) {
+ del_timer_sync(&vif->wake_queue);
xenvif_start_queue(vif);
+ }
cond_resched();
}
@@ -1569,6 +1848,28 @@ int xenvif_kthread(void *data)
return 0;
}
+int xenvif_dealloc_kthread(void *data)
+{
+ struct xenvif *vif = data;
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(vif->dealloc_wq,
+ tx_dealloc_work_todo(vif) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ xenvif_tx_dealloc_action(vif);
+ cond_resched();
+ }
+
+ /* Unmap anything remaining*/
+ if (tx_dealloc_work_todo(vif))
+ xenvif_tx_dealloc_action(vif);
+
+ return 0;
+}
+
static int __init netback_init(void)
{
int rc = 0;
@@ -1586,6 +1887,8 @@ static int __init netback_init(void)
if (rc)
goto failed_init;
+ rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
+
return 0;
failed_init:
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e30d80033cbc..057b05700f8b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -658,7 +658,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1060,13 +1060,13 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_bh(&stats->syncp);
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
rx_packets = stats->rx_packets;
tx_packets = stats->tx_packets;
rx_bytes = stats->rx_bytes;
tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
@@ -1282,16 +1282,10 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->rx_refill_timer.function = rx_refill_timeout;
err = -ENOMEM;
- np->stats = alloc_percpu(struct netfront_stats);
+ np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->stats == NULL)
goto exit;
- for_each_possible_cpu(i) {
- struct netfront_stats *xen_nf_stats;
- xen_nf_stats = per_cpu_ptr(np->stats, i);
- u64_stats_init(&xen_nf_stats->syncp);
- }
-
/* Initialise tx_skbs as a free chain containing every entry. */
np->tx_skb_freelist = 0;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index fe20e1cc0545..65d4ca19d132 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -26,6 +26,18 @@ config NFC_WILINK
Say Y here to compile support for Texas Instrument's NFC WiLink driver
into the kernel or say M to compile it as module.
+config NFC_TRF7970A
+ tristate "Texas Instruments TRF7970a NFC driver"
+ depends on SPI && NFC_DIGITAL
+ help
+ This option enables the NFC driver for Texas Instruments' TRF7970a
+ device. Such device supports 5 different protocols: ISO14443A,
+ ISO14443B, FeLiCa, ISO15693 and ISO18000-3.
+
+ Say Y here to compile support for TRF7970a into the kernel or
+ say M to compile it as a module. The module will be called
+ trf7970a.ko.
+
config NFC_MEI_PHY
tristate "MEI bus NFC device support"
depends on INTEL_MEI && NFC_HCI
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 56ab822ba03d..ae42a3fa60c9 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o
obj-$(CONFIG_NFC_SIM) += nfcsim.o
obj-$(CONFIG_NFC_PORT100) += port100.o
obj-$(CONFIG_NFC_MRVL) += nfcmrvl/
+obj-$(CONFIG_NFC_TRF7970A) += trf7970a.o
ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index cf1a87bb74f8..d46a700a9637 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -55,26 +55,14 @@
NFC_PROTO_NFC_DEP_MASK)
static const struct usb_device_id pn533_table[] = {
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = PN533_VENDOR_ID,
- .idProduct = PN533_PRODUCT_ID,
- .driver_info = PN533_DEVICE_STD,
- },
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = SCM_VENDOR_ID,
- .idProduct = SCL3711_PRODUCT_ID,
- .driver_info = PN533_DEVICE_STD,
- },
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = SONY_VENDOR_ID,
- .idProduct = PASORI_PRODUCT_ID,
- .driver_info = PN533_DEVICE_PASORI,
- },
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = ACS_VENDOR_ID,
- .idProduct = ACR122U_PRODUCT_ID,
- .driver_info = PN533_DEVICE_ACR122U,
- },
+ { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_STD },
+ { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_STD },
+ { USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_PASORI },
+ { USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_ACR122U },
{ }
};
MODULE_DEVICE_TABLE(usb, pn533_table);
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index d6185ff2f87b..f2acd85be86e 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -58,8 +58,19 @@ MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
+/*
+ * Exposed through the 4 most significant bytes
+ * from the HCI SW_VERSION first byte, a.k.a.
+ * SW RomLib.
+ */
+#define PN544_HW_VARIANT_C2 0xa
+#define PN544_HW_VARIANT_C3 0xb
+
+#define PN544_FW_CMD_RESET 0x01
#define PN544_FW_CMD_WRITE 0x08
#define PN544_FW_CMD_CHECK 0x06
+#define PN544_FW_CMD_SECURE_WRITE 0x0C
+#define PN544_FW_CMD_SECURE_CHUNK_WRITE 0x0D
struct pn544_i2c_fw_frame_write {
u8 cmd;
@@ -88,13 +99,31 @@ struct pn544_i2c_fw_blob {
u8 data[];
};
+struct pn544_i2c_fw_secure_frame {
+ u8 cmd;
+ u16 be_datalen;
+ u8 data[];
+} __packed;
+
+struct pn544_i2c_fw_secure_blob {
+ u64 header;
+ u8 data[];
+};
+
#define PN544_FW_CMD_RESULT_TIMEOUT 0x01
#define PN544_FW_CMD_RESULT_BAD_CRC 0x02
#define PN544_FW_CMD_RESULT_ACCESS_DENIED 0x08
#define PN544_FW_CMD_RESULT_PROTOCOL_ERROR 0x0B
#define PN544_FW_CMD_RESULT_INVALID_PARAMETER 0x11
+#define PN544_FW_CMD_RESULT_UNSUPPORTED_COMMAND 0x13
#define PN544_FW_CMD_RESULT_INVALID_LENGTH 0x18
+#define PN544_FW_CMD_RESULT_CRYPTOGRAPHIC_ERROR 0x19
+#define PN544_FW_CMD_RESULT_VERSION_CONDITIONS_ERROR 0x1D
+#define PN544_FW_CMD_RESULT_MEMORY_ERROR 0x20
+#define PN544_FW_CMD_RESULT_CHUNK_OK 0x21
#define PN544_FW_CMD_RESULT_WRITE_FAILED 0x74
+#define PN544_FW_CMD_RESULT_COMMAND_REJECTED 0xE0
+#define PN544_FW_CMD_RESULT_CHUNK_ERROR 0xE6
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
@@ -104,11 +133,17 @@ struct pn544_i2c_fw_blob {
#define PN544_FW_I2C_WRITE_DATA_MAX_LEN MIN((PN544_FW_I2C_MAX_PAYLOAD -\
PN544_FW_I2C_WRITE_FRAME_HEADER_LEN),\
PN544_FW_WRITE_BUFFER_MAX_LEN)
+#define PN544_FW_SECURE_CHUNK_WRITE_HEADER_LEN 3
+#define PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN (PN544_FW_I2C_MAX_PAYLOAD -\
+ PN544_FW_SECURE_CHUNK_WRITE_HEADER_LEN)
+#define PN544_FW_SECURE_FRAME_HEADER_LEN 3
+#define PN544_FW_SECURE_BLOB_HEADER_LEN 8
#define FW_WORK_STATE_IDLE 1
#define FW_WORK_STATE_START 2
#define FW_WORK_STATE_WAIT_WRITE_ANSWER 3
#define FW_WORK_STATE_WAIT_CHECK_ANSWER 4
+#define FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER 5
struct pn544_i2c_phy {
struct i2c_client *i2c_dev;
@@ -119,6 +154,8 @@ struct pn544_i2c_phy {
unsigned int gpio_fw;
unsigned int en_polarity;
+ u8 hw_variant;
+
struct work_struct fw_work;
int fw_work_state;
char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
@@ -127,6 +164,8 @@ struct pn544_i2c_phy {
size_t fw_blob_size;
const u8 *fw_blob_data;
size_t fw_written;
+ size_t fw_size;
+
int fw_cmd_result;
int powered;
@@ -390,6 +429,8 @@ static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
switch (response.status) {
case 0:
return 0;
+ case PN544_FW_CMD_RESULT_CHUNK_OK:
+ return response.status;
case PN544_FW_CMD_RESULT_TIMEOUT:
return -ETIMEDOUT;
case PN544_FW_CMD_RESULT_BAD_CRC:
@@ -400,9 +441,20 @@ static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
return -EPROTO;
case PN544_FW_CMD_RESULT_INVALID_PARAMETER:
return -EINVAL;
+ case PN544_FW_CMD_RESULT_UNSUPPORTED_COMMAND:
+ return -ENOTSUPP;
case PN544_FW_CMD_RESULT_INVALID_LENGTH:
return -EBADMSG;
+ case PN544_FW_CMD_RESULT_CRYPTOGRAPHIC_ERROR:
+ return -ENOKEY;
+ case PN544_FW_CMD_RESULT_VERSION_CONDITIONS_ERROR:
+ return -EINVAL;
+ case PN544_FW_CMD_RESULT_MEMORY_ERROR:
+ return -ENOMEM;
+ case PN544_FW_CMD_RESULT_COMMAND_REJECTED:
+ return -EACCES;
case PN544_FW_CMD_RESULT_WRITE_FAILED:
+ case PN544_FW_CMD_RESULT_CHUNK_ERROR:
return -EIO;
default:
return -EIO;
@@ -469,7 +521,8 @@ static struct nfc_phy_ops i2c_phy_ops = {
.disable = pn544_hci_i2c_disable,
};
-static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
+static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name,
+ u8 hw_variant)
{
struct pn544_i2c_phy *phy = phy_id;
@@ -477,6 +530,7 @@ static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
strcpy(phy->firmware_name, firmware_name);
+ phy->hw_variant = hw_variant;
phy->fw_work_state = FW_WORK_STATE_START;
schedule_work(&phy->fw_work);
@@ -598,12 +652,93 @@ static int pn544_hci_i2c_fw_write_chunk(struct pn544_i2c_phy *phy)
return 0;
}
+static int pn544_hci_i2c_fw_secure_write_frame_cmd(struct pn544_i2c_phy *phy,
+ const u8 *data, u16 datalen)
+{
+ u8 buf[PN544_FW_I2C_MAX_PAYLOAD];
+ struct pn544_i2c_fw_secure_frame *chunk;
+ int chunklen;
+ int r;
+
+ if (datalen > PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN)
+ datalen = PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN;
+
+ chunk = (struct pn544_i2c_fw_secure_frame *) buf;
+
+ chunk->cmd = PN544_FW_CMD_SECURE_CHUNK_WRITE;
+
+ put_unaligned_be16(datalen, &chunk->be_datalen);
+
+ memcpy(chunk->data, data, datalen);
+
+ chunklen = sizeof(chunk->cmd) + sizeof(chunk->be_datalen) + datalen;
+
+ r = i2c_master_send(phy->i2c_dev, buf, chunklen);
+
+ if (r == chunklen)
+ return datalen;
+ else if (r < 0)
+ return r;
+ else
+ return -EIO;
+
+}
+
+static int pn544_hci_i2c_fw_secure_write_frame(struct pn544_i2c_phy *phy)
+{
+ struct pn544_i2c_fw_secure_frame *framep;
+ int r;
+
+ framep = (struct pn544_i2c_fw_secure_frame *) phy->fw_blob_data;
+ if (phy->fw_written == 0)
+ phy->fw_blob_size = get_unaligned_be16(&framep->be_datalen)
+ + PN544_FW_SECURE_FRAME_HEADER_LEN;
+
+ /* Only secure write command can be chunked*/
+ if (phy->fw_blob_size > PN544_FW_I2C_MAX_PAYLOAD &&
+ framep->cmd != PN544_FW_CMD_SECURE_WRITE)
+ return -EINVAL;
+
+ /* The firmware also have other commands, we just send them directly */
+ if (phy->fw_blob_size < PN544_FW_I2C_MAX_PAYLOAD) {
+ r = i2c_master_send(phy->i2c_dev,
+ (const char *) phy->fw_blob_data, phy->fw_blob_size);
+
+ if (r == phy->fw_blob_size)
+ goto exit;
+ else if (r < 0)
+ return r;
+ else
+ return -EIO;
+ }
+
+ r = pn544_hci_i2c_fw_secure_write_frame_cmd(phy,
+ phy->fw_blob_data + phy->fw_written,
+ phy->fw_blob_size - phy->fw_written);
+ if (r < 0)
+ return r;
+
+exit:
+ phy->fw_written += r;
+ phy->fw_work_state = FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER;
+
+ /* SW reset command will not trig any response from PN544 */
+ if (framep->cmd == PN544_FW_CMD_RESET) {
+ pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE);
+ phy->fw_cmd_result = 0;
+ schedule_work(&phy->fw_work);
+ }
+
+ return 0;
+}
+
static void pn544_hci_i2c_fw_work(struct work_struct *work)
{
struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy,
fw_work);
int r;
struct pn544_i2c_fw_blob *blob;
+ struct pn544_i2c_fw_secure_blob *secure_blob;
switch (phy->fw_work_state) {
case FW_WORK_STATE_START:
@@ -614,13 +749,29 @@ static void pn544_hci_i2c_fw_work(struct work_struct *work)
if (r < 0)
goto exit_state_start;
- blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
- phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
- phy->fw_blob_dest_addr = get_unaligned_be32(&blob->be_destaddr);
- phy->fw_blob_data = blob->data;
-
phy->fw_written = 0;
- r = pn544_hci_i2c_fw_write_chunk(phy);
+
+ switch (phy->hw_variant) {
+ case PN544_HW_VARIANT_C2:
+ blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
+ phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
+ phy->fw_blob_dest_addr = get_unaligned_be32(
+ &blob->be_destaddr);
+ phy->fw_blob_data = blob->data;
+
+ r = pn544_hci_i2c_fw_write_chunk(phy);
+ break;
+ case PN544_HW_VARIANT_C3:
+ secure_blob = (struct pn544_i2c_fw_secure_blob *)
+ phy->fw->data;
+ phy->fw_blob_data = secure_blob->data;
+ phy->fw_size = phy->fw->size;
+ r = pn544_hci_i2c_fw_secure_write_frame(phy);
+ break;
+ default:
+ r = -ENOTSUPP;
+ break;
+ }
exit_state_start:
if (r < 0)
@@ -672,6 +823,35 @@ exit_state_wait_check_answer:
pn544_hci_i2c_fw_work_complete(phy, r);
break;
+ case FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER:
+ r = phy->fw_cmd_result;
+ if (r < 0)
+ goto exit_state_wait_secure_write_answer;
+
+ if (r == PN544_FW_CMD_RESULT_CHUNK_OK) {
+ r = pn544_hci_i2c_fw_secure_write_frame(phy);
+ goto exit_state_wait_secure_write_answer;
+ }
+
+ if (phy->fw_written == phy->fw_blob_size) {
+ secure_blob = (struct pn544_i2c_fw_secure_blob *)
+ (phy->fw_blob_data + phy->fw_blob_size);
+ phy->fw_size -= phy->fw_blob_size +
+ PN544_FW_SECURE_BLOB_HEADER_LEN;
+ if (phy->fw_size >= PN544_FW_SECURE_BLOB_HEADER_LEN
+ + PN544_FW_SECURE_FRAME_HEADER_LEN) {
+ phy->fw_blob_data = secure_blob->data;
+
+ phy->fw_written = 0;
+ r = pn544_hci_i2c_fw_secure_write_frame(phy);
+ }
+ }
+
+exit_state_wait_secure_write_answer:
+ if (r < 0 || phy->fw_size == 0)
+ pn544_hci_i2c_fw_work_complete(phy, r);
+ break;
+
default:
break;
}
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 3df4a109cfad..9c8051d20cea 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -786,7 +786,7 @@ static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
if (info->fw_download == NULL)
return -ENOTSUPP;
- return info->fw_download(info->phy_id, firmware_name);
+ return info->fw_download(info->phy_id, firmware_name, hdev->sw_romlib);
}
static int pn544_hci_discover_se(struct nfc_hci_dev *hdev)
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
index 491bf45da358..2aa9233e8086 100644
--- a/drivers/nfc/pn544/pn544.h
+++ b/drivers/nfc/pn544/pn544.h
@@ -25,7 +25,8 @@
#define PN544_HCI_MODE 0
#define PN544_FW_MODE 1
-typedef int (*fw_download_t)(void *context, const char *firmware_name);
+typedef int (*fw_download_t)(void *context, const char *firmware_name,
+ u8 hw_variant);
int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
int phy_headroom, int phy_tailroom, int phy_payload,
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index a8555f81cbba..b7a372af5eb7 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -27,7 +27,8 @@
#define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
NFC_PROTO_MIFARE_MASK | \
NFC_PROTO_FELICA_MASK | \
- NFC_PROTO_NFC_DEP_MASK)
+ NFC_PROTO_NFC_DEP_MASK | \
+ NFC_PROTO_ISO14443_MASK)
#define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
NFC_DIGITAL_DRV_CAPS_TG_CRC)
@@ -139,6 +140,8 @@ static const struct port100_in_rf_setting in_rf_settings[] = {
.in_recv_set_number = 15,
.in_recv_comm_type = PORT100_COMM_TYPE_IN_106A,
},
+ /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */
+ [NFC_DIGITAL_RF_TECH_LAST] = { 0 },
};
/**
@@ -174,6 +177,9 @@ static const struct port100_tg_rf_setting tg_rf_settings[] = {
.tg_set_number = 8,
.tg_comm_type = PORT100_COMM_TYPE_TG_424F,
},
+ /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */
+ [NFC_DIGITAL_RF_TECH_LAST] = { 0 },
+
};
#define PORT100_IN_PROT_INITIAL_GUARD_TIME 0x00
@@ -293,6 +299,10 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
{ PORT100_IN_PROT_CHECK_CRC, 0 },
{ PORT100_IN_PROT_END, 0 },
},
+ [NFC_DIGITAL_FRAMING_NFCA_T4T] = {
+ /* nfc_digital_framing_nfca_standard_with_crc_a */
+ { PORT100_IN_PROT_END, 0 },
+ },
[NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
/* nfc_digital_framing_nfca_standard */
{ PORT100_IN_PROT_END, 0 },
@@ -330,6 +340,10 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
[NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
{ PORT100_IN_PROT_END, 0 },
},
+ /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */
+ [NFC_DIGITAL_FRAMING_LAST] = {
+ { PORT100_IN_PROT_END, 0 },
+ },
};
static struct port100_protocol
@@ -371,6 +385,10 @@ tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = {
{ PORT100_TG_PROT_RF_OFF, 1 },
{ PORT100_TG_PROT_END, 0 },
},
+ /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */
+ [NFC_DIGITAL_FRAMING_LAST] = {
+ { PORT100_TG_PROT_END, 0 },
+ },
};
struct port100 {
@@ -1356,10 +1374,7 @@ static struct nfc_digital_ops port100_digital_ops = {
};
static const struct usb_device_id port100_table[] = {
- { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = SONY_VENDOR_ID,
- .idProduct = RCS380_PRODUCT_ID,
- },
+ { USB_DEVICE(SONY_VENDOR_ID, RCS380_PRODUCT_ID), },
{ }
};
MODULE_DEVICE_TABLE(usb, port100_table);
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
new file mode 100644
index 000000000000..d9babe986473
--- /dev/null
+++ b/drivers/nfc/trf7970a.c
@@ -0,0 +1,1370 @@
+/*
+ * TI TRF7970a RFID/NFC Transceiver Driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Erick Macias <emacias@ti.com>
+ * Author: Felipe Balbi <balbi@ti.com>
+ * Author: Mark A. Greer <mgreer@animalcreek.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/nfc.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/digital.h>
+
+/* There are 3 ways the host can communicate with the trf7970a:
+ * parallel mode, SPI with Slave Select (SS) mode, and SPI without
+ * SS mode. The driver only supports the two SPI modes.
+ *
+ * The trf7970a is very timing sensitive and the VIN, EN2, and EN
+ * pins must asserted in that order and with specific delays in between.
+ * The delays used in the driver were provided by TI and have been
+ * confirmed to work with this driver.
+ *
+ * Timeouts are implemented using the delayed workqueue kernel facility.
+ * Timeouts are required so things don't hang when there is no response
+ * from the trf7970a (or tag). Using this mechanism creates a race with
+ * interrupts, however. That is, an interrupt and a timeout could occur
+ * closely enough together that one is blocked by the mutex while the other
+ * executes. When the timeout handler executes first and blocks the
+ * interrupt handler, it will eventually set the state to IDLE so the
+ * interrupt handler will check the state and exit with no harm done.
+ * When the interrupt handler executes first and blocks the timeout handler,
+ * the cancel_delayed_work() call will know that it didn't cancel the
+ * work item (i.e., timeout) and will return zero. That return code is
+ * used by the timer handler to indicate that it should ignore the timeout
+ * once its unblocked.
+ *
+ * Aborting an active command isn't as simple as it seems because the only
+ * way to abort a command that's already been sent to the tag is so turn
+ * off power to the tag. If we do that, though, we'd have to go through
+ * the entire anticollision procedure again but the digital layer doesn't
+ * support that. So, if an abort is received before trf7970a_in_send_cmd()
+ * has sent the command to the tag, it simply returns -ECANCELED. If the
+ * command has already been sent to the tag, then the driver continues
+ * normally and recieves the response data (or error) but just before
+ * sending the data upstream, it frees the rx_skb and sends -ECANCELED
+ * upstream instead. If the command failed, that error will be sent
+ * upstream.
+ *
+ * When recieving data from a tag and the interrupt status register has
+ * only the SRX bit set, it means that all of the data has been received
+ * (once what's in the fifo has been read). However, depending on timing
+ * an interrupt status with only the SRX bit set may not be recived. In
+ * those cases, the timeout mechanism is used to wait 5 ms in case more
+ * data arrives. After 5 ms, it is assumed that all of the data has been
+ * received and the accumulated rx data is sent upstream. The
+ * 'TRF7970A_ST_WAIT_FOR_RX_DATA_CONT' state is used for this purpose
+ * (i.e., it indicates that some data has been received but we're not sure
+ * if there is more coming so a timeout in this state means all data has
+ * been received and there isn't an error). The delay is 5 ms since delays
+ * over 2 ms have been observed during testing (a little extra just in case).
+ *
+ * Type 2 write and sector select commands respond with a 4-bit ACK or NACK.
+ * Having only 4 bits in the FIFO won't normally generate an interrupt so
+ * driver enables the '4_bit_RX' bit of the Special Functions register 1
+ * to cause an interrupt in that case. Leaving that bit for a read command
+ * messes up the data returned so it is only enabled when the framing is
+ * 'NFC_DIGITAL_FRAMING_NFCA_T2T' and the command is not a read command.
+ * Unfortunately, that means that the driver has to peek into tx frames
+ * when the framing is 'NFC_DIGITAL_FRAMING_NFCA_T2T'. This is done by
+ * the trf7970a_per_cmd_config() routine.
+ *
+ * ISO/IEC 15693 frames specify whether to use single or double sub-carrier
+ * frequencies and whether to use low or high data rates in the flags byte
+ * of the frame. This means that the driver has to peek at all 15693 frames
+ * to determine what speed to set the communication to. In addition, write
+ * and lock commands use the OPTION flag to indicate that an EOF must be
+ * sent to the tag before it will send its response. So the driver has to
+ * examine all frames for that reason too.
+ *
+ * It is unclear how long to wait before sending the EOF. According to the
+ * Note under Table 1-1 in section 1.6 of
+ * http://www.ti.com/lit/ug/scbu011/scbu011.pdf, that wait should be at least
+ * 10 ms for TI Tag-it HF-I tags; however testing has shown that is not long
+ * enough. For this reason, the driver waits 20 ms which seems to work
+ * reliably.
+ */
+
+#define TRF7970A_SUPPORTED_PROTOCOLS \
+ (NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_ISO15693_MASK)
+
+/* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
+ * on what the current framing is, the address of the TX length byte 1
+ * register (0x1d), and the 2 byte length of the data to be transmitted.
+ * That totals 5 bytes.
+ */
+#define TRF7970A_TX_SKB_HEADROOM 5
+
+#define TRF7970A_RX_SKB_ALLOC_SIZE 256
+
+#define TRF7970A_FIFO_SIZE 128
+
+/* TX length is 3 nibbles long ==> 4KB - 1 bytes max */
+#define TRF7970A_TX_MAX (4096 - 1)
+
+#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT 5
+#define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT 3
+#define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF 20
+
+/* Quirks */
+/* Erratum: When reading IRQ Status register on trf7970a, we must issue a
+ * read continuous command for IRQ Status and Collision Position registers.
+ */
+#define TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA BIT(0)
+
+/* Direct commands */
+#define TRF7970A_CMD_IDLE 0x00
+#define TRF7970A_CMD_SOFT_INIT 0x03
+#define TRF7970A_CMD_RF_COLLISION 0x04
+#define TRF7970A_CMD_RF_COLLISION_RESPONSE_N 0x05
+#define TRF7970A_CMD_RF_COLLISION_RESPONSE_0 0x06
+#define TRF7970A_CMD_FIFO_RESET 0x0f
+#define TRF7970A_CMD_TRANSMIT_NO_CRC 0x10
+#define TRF7970A_CMD_TRANSMIT 0x11
+#define TRF7970A_CMD_DELAY_TRANSMIT_NO_CRC 0x12
+#define TRF7970A_CMD_DELAY_TRANSMIT 0x13
+#define TRF7970A_CMD_EOF 0x14
+#define TRF7970A_CMD_CLOSE_SLOT 0x15
+#define TRF7970A_CMD_BLOCK_RX 0x16
+#define TRF7970A_CMD_ENABLE_RX 0x17
+#define TRF7970A_CMD_TEST_EXT_RF 0x18
+#define TRF7970A_CMD_TEST_INT_RF 0x19
+#define TRF7970A_CMD_RX_GAIN_ADJUST 0x1a
+
+/* Bits determining whether its a direct command or register R/W,
+ * whether to use a continuous SPI transaction or not, and the actual
+ * direct cmd opcode or regster address.
+ */
+#define TRF7970A_CMD_BIT_CTRL BIT(7)
+#define TRF7970A_CMD_BIT_RW BIT(6)
+#define TRF7970A_CMD_BIT_CONTINUOUS BIT(5)
+#define TRF7970A_CMD_BIT_OPCODE(opcode) ((opcode) & 0x1f)
+
+/* Registers addresses */
+#define TRF7970A_CHIP_STATUS_CTRL 0x00
+#define TRF7970A_ISO_CTRL 0x01
+#define TRF7970A_ISO14443B_TX_OPTIONS 0x02
+#define TRF7970A_ISO14443A_HIGH_BITRATE_OPTIONS 0x03
+#define TRF7970A_TX_TIMER_SETTING_H_BYTE 0x04
+#define TRF7970A_TX_TIMER_SETTING_L_BYTE 0x05
+#define TRF7970A_TX_PULSE_LENGTH_CTRL 0x06
+#define TRF7970A_RX_NO_RESPONSE_WAIT 0x07
+#define TRF7970A_RX_WAIT_TIME 0x08
+#define TRF7970A_MODULATOR_SYS_CLK_CTRL 0x09
+#define TRF7970A_RX_SPECIAL_SETTINGS 0x0a
+#define TRF7970A_REG_IO_CTRL 0x0b
+#define TRF7970A_IRQ_STATUS 0x0c
+#define TRF7970A_COLLISION_IRQ_MASK 0x0d
+#define TRF7970A_COLLISION_POSITION 0x0e
+#define TRF7970A_RSSI_OSC_STATUS 0x0f
+#define TRF7970A_SPECIAL_FCN_REG1 0x10
+#define TRF7970A_SPECIAL_FCN_REG2 0x11
+#define TRF7970A_RAM1 0x12
+#define TRF7970A_RAM2 0x13
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS 0x14
+#define TRF7970A_NFC_LOW_FIELD_LEVEL 0x16
+#define TRF7970A_NFCID1 0x17
+#define TRF7970A_NFC_TARGET_LEVEL 0x18
+#define TRF79070A_NFC_TARGET_PROTOCOL 0x19
+#define TRF7970A_TEST_REGISTER1 0x1a
+#define TRF7970A_TEST_REGISTER2 0x1b
+#define TRF7970A_FIFO_STATUS 0x1c
+#define TRF7970A_TX_LENGTH_BYTE1 0x1d
+#define TRF7970A_TX_LENGTH_BYTE2 0x1e
+#define TRF7970A_FIFO_IO_REGISTER 0x1f
+
+/* Chip Status Control Register Bits */
+#define TRF7970A_CHIP_STATUS_VRS5_3 BIT(0)
+#define TRF7970A_CHIP_STATUS_REC_ON BIT(1)
+#define TRF7970A_CHIP_STATUS_AGC_ON BIT(2)
+#define TRF7970A_CHIP_STATUS_PM_ON BIT(3)
+#define TRF7970A_CHIP_STATUS_RF_PWR BIT(4)
+#define TRF7970A_CHIP_STATUS_RF_ON BIT(5)
+#define TRF7970A_CHIP_STATUS_DIRECT BIT(6)
+#define TRF7970A_CHIP_STATUS_STBY BIT(7)
+
+/* ISO Control Register Bits */
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF4_662 0x00
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF256_662 0x01
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648 0x02
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF256_2648 0x03
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a 0x04
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF256_667 0x05
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669 0x06
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF256_2669 0x07
+#define TRF7970A_ISO_CTRL_14443A_106 0x08
+#define TRF7970A_ISO_CTRL_14443A_212 0x09
+#define TRF7970A_ISO_CTRL_14443A_424 0x0a
+#define TRF7970A_ISO_CTRL_14443A_848 0x0b
+#define TRF7970A_ISO_CTRL_14443B_106 0x0c
+#define TRF7970A_ISO_CTRL_14443B_212 0x0d
+#define TRF7970A_ISO_CTRL_14443B_424 0x0e
+#define TRF7970A_ISO_CTRL_14443B_848 0x0f
+#define TRF7970A_ISO_CTRL_FELICA_212 0x1a
+#define TRF7970A_ISO_CTRL_FELICA_424 0x1b
+#define TRF7970A_ISO_CTRL_RFID BIT(5)
+#define TRF7970A_ISO_CTRL_DIR_MODE BIT(6)
+#define TRF7970A_ISO_CTRL_RX_CRC_N BIT(7) /* true == No CRC */
+
+#define TRF7970A_ISO_CTRL_RFID_SPEED_MASK 0x1f
+
+/* Modulator and SYS_CLK Control Register Bits */
+#define TRF7970A_MODULATOR_DEPTH(n) ((n) & 0x7)
+#define TRF7970A_MODULATOR_DEPTH_ASK10 (TRF7970A_MODULATOR_DEPTH(0))
+#define TRF7970A_MODULATOR_DEPTH_OOK (TRF7970A_MODULATOR_DEPTH(1))
+#define TRF7970A_MODULATOR_DEPTH_ASK7 (TRF7970A_MODULATOR_DEPTH(2))
+#define TRF7970A_MODULATOR_DEPTH_ASK8_5 (TRF7970A_MODULATOR_DEPTH(3))
+#define TRF7970A_MODULATOR_DEPTH_ASK13 (TRF7970A_MODULATOR_DEPTH(4))
+#define TRF7970A_MODULATOR_DEPTH_ASK16 (TRF7970A_MODULATOR_DEPTH(5))
+#define TRF7970A_MODULATOR_DEPTH_ASK22 (TRF7970A_MODULATOR_DEPTH(6))
+#define TRF7970A_MODULATOR_DEPTH_ASK30 (TRF7970A_MODULATOR_DEPTH(7))
+#define TRF7970A_MODULATOR_EN_ANA BIT(3)
+#define TRF7970A_MODULATOR_CLK(n) (((n) & 0x3) << 4)
+#define TRF7970A_MODULATOR_CLK_DISABLED (TRF7970A_MODULATOR_CLK(0))
+#define TRF7970A_MODULATOR_CLK_3_6 (TRF7970A_MODULATOR_CLK(1))
+#define TRF7970A_MODULATOR_CLK_6_13 (TRF7970A_MODULATOR_CLK(2))
+#define TRF7970A_MODULATOR_CLK_13_27 (TRF7970A_MODULATOR_CLK(3))
+#define TRF7970A_MODULATOR_EN_OOK BIT(6)
+#define TRF7970A_MODULATOR_27MHZ BIT(7)
+
+/* IRQ Status Register Bits */
+#define TRF7970A_IRQ_STATUS_NORESP BIT(0) /* ISO15693 only */
+#define TRF7970A_IRQ_STATUS_COL BIT(1)
+#define TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR BIT(2)
+#define TRF7970A_IRQ_STATUS_PARITY_ERROR BIT(3)
+#define TRF7970A_IRQ_STATUS_CRC_ERROR BIT(4)
+#define TRF7970A_IRQ_STATUS_FIFO BIT(5)
+#define TRF7970A_IRQ_STATUS_SRX BIT(6)
+#define TRF7970A_IRQ_STATUS_TX BIT(7)
+
+#define TRF7970A_IRQ_STATUS_ERROR \
+ (TRF7970A_IRQ_STATUS_COL | \
+ TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR | \
+ TRF7970A_IRQ_STATUS_PARITY_ERROR | \
+ TRF7970A_IRQ_STATUS_CRC_ERROR)
+
+#define TRF7970A_SPECIAL_FCN_REG1_COL_7_6 BIT(0)
+#define TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL BIT(1)
+#define TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX BIT(2)
+#define TRF7970A_SPECIAL_FCN_REG1_SP_DIR_MODE BIT(3)
+#define TRF7970A_SPECIAL_FCN_REG1_NEXT_SLOT_37US BIT(4)
+#define TRF7970A_SPECIAL_FCN_REG1_PAR43 BIT(5)
+
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_124 (0x0 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_120 (0x1 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_112 (0x2 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 (0x3 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_4 0x0
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_8 0x1
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_16 0x2
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32 0x3
+
+#define TRF7970A_FIFO_STATUS_OVERFLOW BIT(7)
+
+/* NFC (ISO/IEC 14443A) Type 2 Tag commands */
+#define NFC_T2T_CMD_READ 0x30
+
+/* ISO 15693 commands codes */
+#define ISO15693_CMD_INVENTORY 0x01
+#define ISO15693_CMD_READ_SINGLE_BLOCK 0x20
+#define ISO15693_CMD_WRITE_SINGLE_BLOCK 0x21
+#define ISO15693_CMD_LOCK_BLOCK 0x22
+#define ISO15693_CMD_READ_MULTIPLE_BLOCK 0x23
+#define ISO15693_CMD_WRITE_MULTIPLE_BLOCK 0x24
+#define ISO15693_CMD_SELECT 0x25
+#define ISO15693_CMD_RESET_TO_READY 0x26
+#define ISO15693_CMD_WRITE_AFI 0x27
+#define ISO15693_CMD_LOCK_AFI 0x28
+#define ISO15693_CMD_WRITE_DSFID 0x29
+#define ISO15693_CMD_LOCK_DSFID 0x2a
+#define ISO15693_CMD_GET_SYSTEM_INFO 0x2b
+#define ISO15693_CMD_GET_MULTIPLE_BLOCK_SECURITY_STATUS 0x2c
+
+/* ISO 15693 request and response flags */
+#define ISO15693_REQ_FLAG_SUB_CARRIER BIT(0)
+#define ISO15693_REQ_FLAG_DATA_RATE BIT(1)
+#define ISO15693_REQ_FLAG_INVENTORY BIT(2)
+#define ISO15693_REQ_FLAG_PROTOCOL_EXT BIT(3)
+#define ISO15693_REQ_FLAG_SELECT BIT(4)
+#define ISO15693_REQ_FLAG_AFI BIT(4)
+#define ISO15693_REQ_FLAG_ADDRESS BIT(5)
+#define ISO15693_REQ_FLAG_NB_SLOTS BIT(5)
+#define ISO15693_REQ_FLAG_OPTION BIT(6)
+
+#define ISO15693_REQ_FLAG_SPEED_MASK \
+ (ISO15693_REQ_FLAG_SUB_CARRIER | ISO15693_REQ_FLAG_DATA_RATE)
+
+enum trf7970a_state {
+ TRF7970A_ST_OFF,
+ TRF7970A_ST_IDLE,
+ TRF7970A_ST_IDLE_RX_BLOCKED,
+ TRF7970A_ST_WAIT_FOR_TX_FIFO,
+ TRF7970A_ST_WAIT_FOR_RX_DATA,
+ TRF7970A_ST_WAIT_FOR_RX_DATA_CONT,
+ TRF7970A_ST_WAIT_TO_ISSUE_EOF,
+ TRF7970A_ST_MAX
+};
+
+struct trf7970a {
+ enum trf7970a_state state;
+ struct device *dev;
+ struct spi_device *spi;
+ struct regulator *regulator;
+ struct nfc_digital_dev *ddev;
+ u32 quirks;
+ bool powering_up;
+ bool aborting;
+ struct sk_buff *tx_skb;
+ struct sk_buff *rx_skb;
+ nfc_digital_cmd_complete_t cb;
+ void *cb_arg;
+ u8 iso_ctrl;
+ u8 special_fcn_reg1;
+ int technology;
+ int framing;
+ u8 tx_cmd;
+ bool issue_eof;
+ int en2_gpio;
+ int en_gpio;
+ struct mutex lock;
+ unsigned int timeout;
+ bool ignore_timeout;
+ struct delayed_work timeout_work;
+};
+
+
+static int trf7970a_cmd(struct trf7970a *trf, u8 opcode)
+{
+ u8 cmd = TRF7970A_CMD_BIT_CTRL | TRF7970A_CMD_BIT_OPCODE(opcode);
+ int ret;
+
+ dev_dbg(trf->dev, "cmd: 0x%x\n", cmd);
+
+ ret = spi_write(trf->spi, &cmd, 1);
+ if (ret)
+ dev_err(trf->dev, "%s - cmd: 0x%x, ret: %d\n", __func__, cmd,
+ ret);
+ return ret;
+}
+
+static int trf7970a_read(struct trf7970a *trf, u8 reg, u8 *val)
+{
+ u8 addr = TRF7970A_CMD_BIT_RW | reg;
+ int ret;
+
+ ret = spi_write_then_read(trf->spi, &addr, 1, val, 1);
+ if (ret)
+ dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr,
+ ret);
+
+ dev_dbg(trf->dev, "read(0x%x): 0x%x\n", addr, *val);
+
+ return ret;
+}
+
+static int trf7970a_read_cont(struct trf7970a *trf, u8 reg,
+ u8 *buf, size_t len)
+{
+ u8 addr = reg | TRF7970A_CMD_BIT_RW | TRF7970A_CMD_BIT_CONTINUOUS;
+ int ret;
+
+ dev_dbg(trf->dev, "read_cont(0x%x, %zd)\n", addr, len);
+
+ ret = spi_write_then_read(trf->spi, &addr, 1, buf, len);
+ if (ret)
+ dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr,
+ ret);
+ return ret;
+}
+
+static int trf7970a_write(struct trf7970a *trf, u8 reg, u8 val)
+{
+ u8 buf[2] = { reg, val };
+ int ret;
+
+ dev_dbg(trf->dev, "write(0x%x): 0x%x\n", reg, val);
+
+ ret = spi_write(trf->spi, buf, 2);
+ if (ret)
+ dev_err(trf->dev, "%s - write: 0x%x 0x%x, ret: %d\n", __func__,
+ buf[0], buf[1], ret);
+
+ return ret;
+}
+
+static int trf7970a_read_irqstatus(struct trf7970a *trf, u8 *status)
+{
+ int ret;
+ u8 buf[2];
+ u8 addr;
+
+ addr = TRF7970A_IRQ_STATUS | TRF7970A_CMD_BIT_RW;
+
+ if (trf->quirks & TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA) {
+ addr |= TRF7970A_CMD_BIT_CONTINUOUS;
+ ret = spi_write_then_read(trf->spi, &addr, 1, buf, 2);
+ } else {
+ ret = spi_write_then_read(trf->spi, &addr, 1, buf, 1);
+ }
+
+ if (ret)
+ dev_err(trf->dev, "%s - irqstatus: Status read failed: %d\n",
+ __func__, ret);
+ else
+ *status = buf[0];
+
+ return ret;
+}
+
+static void trf7970a_send_upstream(struct trf7970a *trf)
+{
+ u8 rssi;
+
+ dev_kfree_skb_any(trf->tx_skb);
+ trf->tx_skb = NULL;
+
+ if (trf->rx_skb && !IS_ERR(trf->rx_skb) && !trf->aborting)
+ print_hex_dump_debug("trf7970a rx data: ", DUMP_PREFIX_NONE,
+ 16, 1, trf->rx_skb->data, trf->rx_skb->len,
+ false);
+
+ /* According to the manual it is "good form" to reset the fifo and
+ * read the RSSI levels & oscillator status register here. It doesn't
+ * explain why.
+ */
+ trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+ trf7970a_read(trf, TRF7970A_RSSI_OSC_STATUS, &rssi);
+
+ trf->state = TRF7970A_ST_IDLE;
+
+ if (trf->aborting) {
+ dev_dbg(trf->dev, "Abort process complete\n");
+
+ if (!IS_ERR(trf->rx_skb)) {
+ kfree_skb(trf->rx_skb);
+ trf->rx_skb = ERR_PTR(-ECANCELED);
+ }
+
+ trf->aborting = false;
+ }
+
+ trf->cb(trf->ddev, trf->cb_arg, trf->rx_skb);
+
+ trf->rx_skb = NULL;
+}
+
+static void trf7970a_send_err_upstream(struct trf7970a *trf, int errno)
+{
+ dev_dbg(trf->dev, "Error - state: %d, errno: %d\n", trf->state, errno);
+
+ kfree_skb(trf->rx_skb);
+ trf->rx_skb = ERR_PTR(errno);
+
+ trf7970a_send_upstream(trf);
+}
+
+static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb,
+ unsigned int len)
+{
+ unsigned int timeout;
+ int ret;
+
+ print_hex_dump_debug("trf7970a tx data: ", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, len, false);
+
+ ret = spi_write(trf->spi, skb->data, len);
+ if (ret) {
+ dev_err(trf->dev, "%s - Can't send tx data: %d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ skb_pull(skb, len);
+
+ if (skb->len > 0) {
+ trf->state = TRF7970A_ST_WAIT_FOR_TX_FIFO;
+ timeout = TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT;
+ } else {
+ if (trf->issue_eof) {
+ trf->state = TRF7970A_ST_WAIT_TO_ISSUE_EOF;
+ timeout = TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF;
+ } else {
+ trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA;
+ timeout = trf->timeout;
+ }
+ }
+
+ dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n", timeout,
+ trf->state);
+
+ schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(timeout));
+
+ return 0;
+}
+
+static void trf7970a_fill_fifo(struct trf7970a *trf)
+{
+ struct sk_buff *skb = trf->tx_skb;
+ unsigned int len;
+ int ret;
+ u8 fifo_bytes;
+
+ ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes);
+ if (ret) {
+ trf7970a_send_err_upstream(trf, ret);
+ return;
+ }
+
+ dev_dbg(trf->dev, "Filling FIFO - fifo_bytes: 0x%x\n", fifo_bytes);
+
+ if (fifo_bytes & TRF7970A_FIFO_STATUS_OVERFLOW) {
+ dev_err(trf->dev, "%s - fifo overflow: 0x%x\n", __func__,
+ fifo_bytes);
+ trf7970a_send_err_upstream(trf, -EIO);
+ return;
+ }
+
+ /* Calculate how much more data can be written to the fifo */
+ len = TRF7970A_FIFO_SIZE - fifo_bytes;
+ len = min(skb->len, len);
+
+ ret = trf7970a_transmit(trf, skb, len);
+ if (ret)
+ trf7970a_send_err_upstream(trf, ret);
+}
+
+static void trf7970a_drain_fifo(struct trf7970a *trf, u8 status)
+{
+ struct sk_buff *skb = trf->rx_skb;
+ int ret;
+ u8 fifo_bytes;
+
+ if (status & TRF7970A_IRQ_STATUS_ERROR) {
+ trf7970a_send_err_upstream(trf, -EIO);
+ return;
+ }
+
+ ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes);
+ if (ret) {
+ trf7970a_send_err_upstream(trf, ret);
+ return;
+ }
+
+ dev_dbg(trf->dev, "Draining FIFO - fifo_bytes: 0x%x\n", fifo_bytes);
+
+ if (!fifo_bytes)
+ goto no_rx_data;
+
+ if (fifo_bytes & TRF7970A_FIFO_STATUS_OVERFLOW) {
+ dev_err(trf->dev, "%s - fifo overflow: 0x%x\n", __func__,
+ fifo_bytes);
+ trf7970a_send_err_upstream(trf, -EIO);
+ return;
+ }
+
+ if (fifo_bytes > skb_tailroom(skb)) {
+ skb = skb_copy_expand(skb, skb_headroom(skb),
+ max_t(int, fifo_bytes,
+ TRF7970A_RX_SKB_ALLOC_SIZE),
+ GFP_KERNEL);
+ if (!skb) {
+ trf7970a_send_err_upstream(trf, -ENOMEM);
+ return;
+ }
+
+ kfree_skb(trf->rx_skb);
+ trf->rx_skb = skb;
+ }
+
+ ret = trf7970a_read_cont(trf, TRF7970A_FIFO_IO_REGISTER,
+ skb_put(skb, fifo_bytes), fifo_bytes);
+ if (ret) {
+ trf7970a_send_err_upstream(trf, ret);
+ return;
+ }
+
+ /* If received Type 2 ACK/NACK, shift right 4 bits and pass up */
+ if ((trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T) && (skb->len == 1) &&
+ (trf->special_fcn_reg1 ==
+ TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX)) {
+ skb->data[0] >>= 4;
+ status = TRF7970A_IRQ_STATUS_SRX;
+ } else {
+ trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA_CONT;
+ }
+
+no_rx_data:
+ if (status == TRF7970A_IRQ_STATUS_SRX) { /* Receive complete */
+ trf7970a_send_upstream(trf);
+ return;
+ }
+
+ dev_dbg(trf->dev, "Setting timeout for %d ms\n",
+ TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT);
+
+ schedule_delayed_work(&trf->timeout_work,
+ msecs_to_jiffies(TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT));
+}
+
+static irqreturn_t trf7970a_irq(int irq, void *dev_id)
+{
+ struct trf7970a *trf = dev_id;
+ int ret;
+ u8 status;
+
+ mutex_lock(&trf->lock);
+
+ if (trf->state == TRF7970A_ST_OFF) {
+ mutex_unlock(&trf->lock);
+ return IRQ_NONE;
+ }
+
+ ret = trf7970a_read_irqstatus(trf, &status);
+ if (ret) {
+ mutex_unlock(&trf->lock);
+ return IRQ_NONE;
+ }
+
+ dev_dbg(trf->dev, "IRQ - state: %d, status: 0x%x\n", trf->state,
+ status);
+
+ if (!status) {
+ mutex_unlock(&trf->lock);
+ return IRQ_NONE;
+ }
+
+ switch (trf->state) {
+ case TRF7970A_ST_IDLE:
+ case TRF7970A_ST_IDLE_RX_BLOCKED:
+ /* If getting interrupts caused by RF noise, turn off the
+ * receiver to avoid unnecessary interrupts. It will be
+ * turned back on in trf7970a_in_send_cmd() when the next
+ * command is issued.
+ */
+ if (status & TRF7970A_IRQ_STATUS_ERROR) {
+ trf7970a_cmd(trf, TRF7970A_CMD_BLOCK_RX);
+ trf->state = TRF7970A_ST_IDLE_RX_BLOCKED;
+ }
+
+ trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+ break;
+ case TRF7970A_ST_WAIT_FOR_TX_FIFO:
+ if (status & TRF7970A_IRQ_STATUS_TX) {
+ trf->ignore_timeout =
+ !cancel_delayed_work(&trf->timeout_work);
+ trf7970a_fill_fifo(trf);
+ } else {
+ trf7970a_send_err_upstream(trf, -EIO);
+ }
+ break;
+ case TRF7970A_ST_WAIT_FOR_RX_DATA:
+ case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
+ if (status & TRF7970A_IRQ_STATUS_SRX) {
+ trf->ignore_timeout =
+ !cancel_delayed_work(&trf->timeout_work);
+ trf7970a_drain_fifo(trf, status);
+ } else if (!(status & TRF7970A_IRQ_STATUS_TX)) {
+ trf7970a_send_err_upstream(trf, -EIO);
+ }
+ break;
+ case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
+ if (status != TRF7970A_IRQ_STATUS_TX)
+ trf7970a_send_err_upstream(trf, -EIO);
+ break;
+ default:
+ dev_err(trf->dev, "%s - Driver in invalid state: %d\n",
+ __func__, trf->state);
+ }
+
+ mutex_unlock(&trf->lock);
+ return IRQ_HANDLED;
+}
+
+static void trf7970a_issue_eof(struct trf7970a *trf)
+{
+ int ret;
+
+ dev_dbg(trf->dev, "Issuing EOF\n");
+
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+ if (ret)
+ trf7970a_send_err_upstream(trf, ret);
+
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_EOF);
+ if (ret)
+ trf7970a_send_err_upstream(trf, ret);
+
+ trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA;
+
+ dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n",
+ trf->timeout, trf->state);
+
+ schedule_delayed_work(&trf->timeout_work,
+ msecs_to_jiffies(trf->timeout));
+}
+
+static void trf7970a_timeout_work_handler(struct work_struct *work)
+{
+ struct trf7970a *trf = container_of(work, struct trf7970a,
+ timeout_work.work);
+
+ dev_dbg(trf->dev, "Timeout - state: %d, ignore_timeout: %d\n",
+ trf->state, trf->ignore_timeout);
+
+ mutex_lock(&trf->lock);
+
+ if (trf->ignore_timeout)
+ trf->ignore_timeout = false;
+ else if (trf->state == TRF7970A_ST_WAIT_FOR_RX_DATA_CONT)
+ trf7970a_send_upstream(trf); /* No more rx data so send up */
+ else if (trf->state == TRF7970A_ST_WAIT_TO_ISSUE_EOF)
+ trf7970a_issue_eof(trf);
+ else
+ trf7970a_send_err_upstream(trf, -ETIMEDOUT);
+
+ mutex_unlock(&trf->lock);
+}
+
+static int trf7970a_init(struct trf7970a *trf)
+{
+ int ret;
+
+ dev_dbg(trf->dev, "Initializing device - state: %d\n", trf->state);
+
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_SOFT_INIT);
+ if (ret)
+ goto err_out;
+
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_IDLE);
+ if (ret)
+ goto err_out;
+
+ ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
+ TRF7970A_MODULATOR_DEPTH_OOK);
+ if (ret)
+ goto err_out;
+
+ ret = trf7970a_write(trf, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS,
+ TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 |
+ TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32);
+ if (ret)
+ goto err_out;
+
+ ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1, 0);
+ if (ret)
+ goto err_out;
+
+ trf->special_fcn_reg1 = 0;
+
+ ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
+ TRF7970A_CHIP_STATUS_RF_ON |
+ TRF7970A_CHIP_STATUS_VRS5_3);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ dev_dbg(trf->dev, "Couldn't init device: %d\n", ret);
+ return ret;
+}
+
+static void trf7970a_switch_rf_off(struct trf7970a *trf)
+{
+ dev_dbg(trf->dev, "Switching rf off\n");
+
+ gpio_set_value(trf->en_gpio, 0);
+ gpio_set_value(trf->en2_gpio, 0);
+
+ trf->aborting = false;
+ trf->state = TRF7970A_ST_OFF;
+}
+
+static int trf7970a_switch_rf_on(struct trf7970a *trf)
+{
+ unsigned long delay;
+ int ret;
+
+ dev_dbg(trf->dev, "Switching rf on\n");
+
+ if (trf->powering_up)
+ usleep_range(5000, 6000);
+
+ gpio_set_value(trf->en2_gpio, 1);
+ usleep_range(1000, 2000);
+ gpio_set_value(trf->en_gpio, 1);
+
+ /* The delay between enabling the trf7970a and issuing the first
+ * command is significantly longer the very first time after powering
+ * up. Make sure the longer delay is only done the first time.
+ */
+ if (trf->powering_up) {
+ delay = 20000;
+ trf->powering_up = false;
+ } else {
+ delay = 5000;
+ }
+
+ usleep_range(delay, delay + 1000);
+
+ ret = trf7970a_init(trf);
+ if (ret)
+ trf7970a_switch_rf_off(trf);
+ else
+ trf->state = TRF7970A_ST_IDLE;
+
+ return ret;
+}
+
+static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ int ret = 0;
+
+ dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on);
+
+ mutex_lock(&trf->lock);
+
+ if (on) {
+ switch (trf->state) {
+ case TRF7970A_ST_OFF:
+ ret = trf7970a_switch_rf_on(trf);
+ break;
+ case TRF7970A_ST_IDLE:
+ case TRF7970A_ST_IDLE_RX_BLOCKED:
+ break;
+ default:
+ dev_err(trf->dev, "%s - Invalid request: %d %d\n",
+ __func__, trf->state, on);
+ trf7970a_switch_rf_off(trf);
+ }
+ } else {
+ switch (trf->state) {
+ case TRF7970A_ST_OFF:
+ break;
+ default:
+ dev_err(trf->dev, "%s - Invalid request: %d %d\n",
+ __func__, trf->state, on);
+ /* FALLTHROUGH */
+ case TRF7970A_ST_IDLE:
+ case TRF7970A_ST_IDLE_RX_BLOCKED:
+ trf7970a_switch_rf_off(trf);
+ }
+ }
+
+ mutex_unlock(&trf->lock);
+ return ret;
+}
+
+static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
+{
+ int ret = 0;
+
+ dev_dbg(trf->dev, "rf technology: %d\n", tech);
+
+ switch (tech) {
+ case NFC_DIGITAL_RF_TECH_106A:
+ trf->iso_ctrl = TRF7970A_ISO_CTRL_14443A_106;
+ break;
+ case NFC_DIGITAL_RF_TECH_ISO15693:
+ trf->iso_ctrl = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+ break;
+ default:
+ dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech);
+ return -EINVAL;
+ }
+
+ trf->technology = tech;
+
+ return ret;
+}
+
+static int trf7970a_config_framing(struct trf7970a *trf, int framing)
+{
+ dev_dbg(trf->dev, "framing: %d\n", framing);
+
+ switch (framing) {
+ case NFC_DIGITAL_FRAMING_NFCA_SHORT:
+ case NFC_DIGITAL_FRAMING_NFCA_STANDARD:
+ trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC;
+ trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+ break;
+ case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A:
+ case NFC_DIGITAL_FRAMING_NFCA_T4T:
+ case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY:
+ case NFC_DIGITAL_FRAMING_ISO15693_T5T:
+ trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
+ trf->iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
+ break;
+ case NFC_DIGITAL_FRAMING_NFCA_T2T:
+ trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
+ trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+ break;
+ default:
+ dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing);
+ return -EINVAL;
+ }
+
+ trf->framing = framing;
+
+ return trf7970a_write(trf, TRF7970A_ISO_CTRL, trf->iso_ctrl);
+}
+
+static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
+ int param)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ int ret = 0;
+
+ dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param);
+
+ mutex_lock(&trf->lock);
+
+ if (trf->state == TRF7970A_ST_OFF) {
+ ret = trf7970a_switch_rf_on(trf);
+ if (ret)
+ goto err_out;
+ }
+
+ switch (type) {
+ case NFC_DIGITAL_CONFIG_RF_TECH:
+ ret = trf7970a_config_rf_tech(trf, param);
+ break;
+ case NFC_DIGITAL_CONFIG_FRAMING:
+ ret = trf7970a_config_framing(trf, param);
+ break;
+ default:
+ dev_dbg(trf->dev, "Unknown type: %d\n", type);
+ ret = -EINVAL;
+ }
+
+err_out:
+ mutex_unlock(&trf->lock);
+ return ret;
+}
+
+static int trf7970a_is_iso15693_write_or_lock(u8 cmd)
+{
+ switch (cmd) {
+ case ISO15693_CMD_WRITE_SINGLE_BLOCK:
+ case ISO15693_CMD_LOCK_BLOCK:
+ case ISO15693_CMD_WRITE_MULTIPLE_BLOCK:
+ case ISO15693_CMD_WRITE_AFI:
+ case ISO15693_CMD_LOCK_AFI:
+ case ISO15693_CMD_WRITE_DSFID:
+ case ISO15693_CMD_LOCK_DSFID:
+ return 1;
+ break;
+ default:
+ return 0;
+ }
+}
+
+static int trf7970a_per_cmd_config(struct trf7970a *trf, struct sk_buff *skb)
+{
+ u8 *req = skb->data;
+ u8 special_fcn_reg1, iso_ctrl;
+ int ret;
+
+ trf->issue_eof = false;
+
+ /* When issuing Type 2 read command, make sure the '4_bit_RX' bit in
+ * special functions register 1 is cleared; otherwise, its a write or
+ * sector select command and '4_bit_RX' must be set.
+ *
+ * When issuing an ISO 15693 command, inspect the flags byte to see
+ * what speed to use. Also, remember if the OPTION flag is set on
+ * a Type 5 write or lock command so the driver will know that it
+ * has to send an EOF in order to get a response.
+ */
+ if ((trf->technology == NFC_DIGITAL_RF_TECH_106A) &&
+ (trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T)) {
+ if (req[0] == NFC_T2T_CMD_READ)
+ special_fcn_reg1 = 0;
+ else
+ special_fcn_reg1 = TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX;
+
+ if (special_fcn_reg1 != trf->special_fcn_reg1) {
+ ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1,
+ special_fcn_reg1);
+ if (ret)
+ return ret;
+
+ trf->special_fcn_reg1 = special_fcn_reg1;
+ }
+ } else if (trf->technology == NFC_DIGITAL_RF_TECH_ISO15693) {
+ iso_ctrl = trf->iso_ctrl & ~TRF7970A_ISO_CTRL_RFID_SPEED_MASK;
+
+ switch (req[0] & ISO15693_REQ_FLAG_SPEED_MASK) {
+ case 0x00:
+ iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_662;
+ break;
+ case ISO15693_REQ_FLAG_SUB_CARRIER:
+ iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a;
+ break;
+ case ISO15693_REQ_FLAG_DATA_RATE:
+ iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+ break;
+ case (ISO15693_REQ_FLAG_SUB_CARRIER |
+ ISO15693_REQ_FLAG_DATA_RATE):
+ iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669;
+ break;
+ }
+
+ if (iso_ctrl != trf->iso_ctrl) {
+ ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl);
+ if (ret)
+ return ret;
+
+ trf->iso_ctrl = iso_ctrl;
+ }
+
+ if ((trf->framing == NFC_DIGITAL_FRAMING_ISO15693_T5T) &&
+ trf7970a_is_iso15693_write_or_lock(req[1]) &&
+ (req[0] & ISO15693_REQ_FLAG_OPTION))
+ trf->issue_eof = true;
+ }
+
+ return 0;
+}
+
+static int trf7970a_in_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+ char *prefix;
+ unsigned int len;
+ int ret;
+
+ dev_dbg(trf->dev, "New request - state: %d, timeout: %d ms, len: %d\n",
+ trf->state, timeout, skb->len);
+
+ if (skb->len > TRF7970A_TX_MAX)
+ return -EINVAL;
+
+ mutex_lock(&trf->lock);
+
+ if ((trf->state != TRF7970A_ST_IDLE) &&
+ (trf->state != TRF7970A_ST_IDLE_RX_BLOCKED)) {
+ dev_err(trf->dev, "%s - Bogus state: %d\n", __func__,
+ trf->state);
+ ret = -EIO;
+ goto out_err;
+ }
+
+ if (trf->aborting) {
+ dev_dbg(trf->dev, "Abort process complete\n");
+ trf->aborting = false;
+ ret = -ECANCELED;
+ goto out_err;
+ }
+
+ trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE,
+ GFP_KERNEL);
+ if (!trf->rx_skb) {
+ dev_dbg(trf->dev, "Can't alloc rx_skb\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ if (trf->state == TRF7970A_ST_IDLE_RX_BLOCKED) {
+ ret = trf7970a_cmd(trf, TRF7970A_CMD_ENABLE_RX);
+ if (ret)
+ goto out_err;
+
+ trf->state = TRF7970A_ST_IDLE;
+ }
+
+ ret = trf7970a_per_cmd_config(trf, skb);
+ if (ret)
+ goto out_err;
+
+ trf->ddev = ddev;
+ trf->tx_skb = skb;
+ trf->cb = cb;
+ trf->cb_arg = arg;
+ trf->timeout = timeout;
+ trf->ignore_timeout = false;
+
+ len = skb->len;
+ prefix = skb_push(skb, TRF7970A_TX_SKB_HEADROOM);
+
+ /* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
+ * on what the current framing is, the address of the TX length byte 1
+ * register (0x1d), and the 2 byte length of the data to be transmitted.
+ */
+ prefix[0] = TRF7970A_CMD_BIT_CTRL |
+ TRF7970A_CMD_BIT_OPCODE(TRF7970A_CMD_FIFO_RESET);
+ prefix[1] = TRF7970A_CMD_BIT_CTRL |
+ TRF7970A_CMD_BIT_OPCODE(trf->tx_cmd);
+ prefix[2] = TRF7970A_CMD_BIT_CONTINUOUS | TRF7970A_TX_LENGTH_BYTE1;
+
+ if (trf->framing == NFC_DIGITAL_FRAMING_NFCA_SHORT) {
+ prefix[3] = 0x00;
+ prefix[4] = 0x0f; /* 7 bits */
+ } else {
+ prefix[3] = (len & 0xf00) >> 4;
+ prefix[3] |= ((len & 0xf0) >> 4);
+ prefix[4] = ((len & 0x0f) << 4);
+ }
+
+ len = min_t(int, skb->len, TRF7970A_FIFO_SIZE);
+
+ usleep_range(1000, 2000);
+
+ ret = trf7970a_transmit(trf, skb, len);
+ if (ret) {
+ kfree_skb(trf->rx_skb);
+ trf->rx_skb = NULL;
+ }
+
+out_err:
+ mutex_unlock(&trf->lock);
+ return ret;
+}
+
+static int trf7970a_tg_configure_hw(struct nfc_digital_dev *ddev,
+ int type, int param)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Unsupported interface\n");
+
+ return -EINVAL;
+}
+
+static int trf7970a_tg_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Unsupported interface\n");
+
+ return -EINVAL;
+}
+
+static int trf7970a_tg_listen(struct nfc_digital_dev *ddev,
+ u16 timeout, nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Unsupported interface\n");
+
+ return -EINVAL;
+}
+
+static int trf7970a_tg_listen_mdaa(struct nfc_digital_dev *ddev,
+ struct digital_tg_mdaa_params *mdaa_params,
+ u16 timeout, nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Unsupported interface\n");
+
+ return -EINVAL;
+}
+
+static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev)
+{
+ struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+ dev_dbg(trf->dev, "Abort process initiated\n");
+
+ mutex_lock(&trf->lock);
+ trf->aborting = true;
+ mutex_unlock(&trf->lock);
+}
+
+static struct nfc_digital_ops trf7970a_nfc_ops = {
+ .in_configure_hw = trf7970a_in_configure_hw,
+ .in_send_cmd = trf7970a_in_send_cmd,
+ .tg_configure_hw = trf7970a_tg_configure_hw,
+ .tg_send_cmd = trf7970a_tg_send_cmd,
+ .tg_listen = trf7970a_tg_listen,
+ .tg_listen_mdaa = trf7970a_tg_listen_mdaa,
+ .switch_rf = trf7970a_switch_rf,
+ .abort_cmd = trf7970a_abort_cmd,
+};
+
+static int trf7970a_probe(struct spi_device *spi)
+{
+ struct device_node *np = spi->dev.of_node;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct trf7970a *trf;
+ int ret;
+
+ if (!np) {
+ dev_err(&spi->dev, "No Device Tree entry\n");
+ return -EINVAL;
+ }
+
+ trf = devm_kzalloc(&spi->dev, sizeof(*trf), GFP_KERNEL);
+ if (!trf)
+ return -ENOMEM;
+
+ trf->state = TRF7970A_ST_OFF;
+ trf->dev = &spi->dev;
+ trf->spi = spi;
+ trf->quirks = id->driver_data;
+
+ spi->mode = SPI_MODE_1;
+ spi->bits_per_word = 8;
+
+ /* There are two enable pins - both must be present */
+ trf->en_gpio = of_get_named_gpio(np, "ti,enable-gpios", 0);
+ if (!gpio_is_valid(trf->en_gpio)) {
+ dev_err(trf->dev, "No EN GPIO property\n");
+ return trf->en_gpio;
+ }
+
+ ret = devm_gpio_request_one(trf->dev, trf->en_gpio,
+ GPIOF_DIR_OUT | GPIOF_INIT_LOW, "EN");
+ if (ret) {
+ dev_err(trf->dev, "Can't request EN GPIO: %d\n", ret);
+ return ret;
+ }
+
+ trf->en2_gpio = of_get_named_gpio(np, "ti,enable-gpios", 1);
+ if (!gpio_is_valid(trf->en2_gpio)) {
+ dev_err(trf->dev, "No EN2 GPIO property\n");
+ return trf->en2_gpio;
+ }
+
+ ret = devm_gpio_request_one(trf->dev, trf->en2_gpio,
+ GPIOF_DIR_OUT | GPIOF_INIT_LOW, "EN2");
+ if (ret) {
+ dev_err(trf->dev, "Can't request EN2 GPIO: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(trf->dev, spi->irq, NULL,
+ trf7970a_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "trf7970a", trf);
+ if (ret) {
+ dev_err(trf->dev, "Can't request IRQ#%d: %d\n", spi->irq, ret);
+ return ret;
+ }
+
+ mutex_init(&trf->lock);
+ INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
+
+ trf->regulator = devm_regulator_get(&spi->dev, "vin");
+ if (IS_ERR(trf->regulator)) {
+ ret = PTR_ERR(trf->regulator);
+ dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
+ goto err_destroy_lock;
+ }
+
+ ret = regulator_enable(trf->regulator);
+ if (ret) {
+ dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
+ goto err_destroy_lock;
+ }
+
+ trf->powering_up = true;
+
+ trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops,
+ TRF7970A_SUPPORTED_PROTOCOLS,
+ NFC_DIGITAL_DRV_CAPS_IN_CRC, TRF7970A_TX_SKB_HEADROOM,
+ 0);
+ if (!trf->ddev) {
+ dev_err(trf->dev, "Can't allocate NFC digital device\n");
+ ret = -ENOMEM;
+ goto err_disable_regulator;
+ }
+
+ nfc_digital_set_parent_dev(trf->ddev, trf->dev);
+ nfc_digital_set_drvdata(trf->ddev, trf);
+ spi_set_drvdata(spi, trf);
+
+ ret = nfc_digital_register_device(trf->ddev);
+ if (ret) {
+ dev_err(trf->dev, "Can't register NFC digital device: %d\n",
+ ret);
+ goto err_free_ddev;
+ }
+
+ return 0;
+
+err_free_ddev:
+ nfc_digital_free_device(trf->ddev);
+err_disable_regulator:
+ regulator_disable(trf->regulator);
+err_destroy_lock:
+ mutex_destroy(&trf->lock);
+ return ret;
+}
+
+static int trf7970a_remove(struct spi_device *spi)
+{
+ struct trf7970a *trf = spi_get_drvdata(spi);
+
+ mutex_lock(&trf->lock);
+
+ trf7970a_switch_rf_off(trf);
+ trf7970a_init(trf);
+
+ switch (trf->state) {
+ case TRF7970A_ST_WAIT_FOR_TX_FIFO:
+ case TRF7970A_ST_WAIT_FOR_RX_DATA:
+ case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
+ case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
+ trf7970a_send_err_upstream(trf, -ECANCELED);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&trf->lock);
+
+ nfc_digital_unregister_device(trf->ddev);
+ nfc_digital_free_device(trf->ddev);
+
+ regulator_disable(trf->regulator);
+
+ mutex_destroy(&trf->lock);
+
+ return 0;
+}
+
+static const struct spi_device_id trf7970a_id_table[] = {
+ { "trf7970a", TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, trf7970a_id_table);
+
+static struct spi_driver trf7970a_spi_driver = {
+ .probe = trf7970a_probe,
+ .remove = trf7970a_remove,
+ .id_table = trf7970a_id_table,
+ .driver = {
+ .name = "trf7970a",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_spi_driver(trf7970a_spi_driver);
+
+MODULE_AUTHOR("Mark A. Greer <mgreer@animalcreek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI trf7970a RFID/NFC Transceiver Driver");
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 5b3c24f3cde5..9a95831bd065 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -43,6 +43,23 @@ static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
}
}
+/* Extract the clause 22 phy ID from the compatible string of the form
+ * ethernet-phy-idAAAA.BBBB */
+static int of_get_phy_id(struct device_node *device, u32 *phy_id)
+{
+ struct property *prop;
+ const char *cp;
+ unsigned int upper, lower;
+
+ of_property_for_each_string(device, "compatible", prop, cp) {
+ if (sscanf(cp, "ethernet-phy-id%4x.%4x", &upper, &lower) == 2) {
+ *phy_id = ((upper & 0xFFFF) << 16) | (lower & 0xFFFF);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child,
u32 addr)
{
@@ -50,11 +67,15 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
bool is_c45;
int rc;
u32 max_speed = 0;
+ u32 phy_id;
is_c45 = of_device_is_compatible(child,
"ethernet-phy-ieee802.3-c45");
- phy = get_phy_device(mdio, addr, is_c45);
+ if (!is_c45 && !of_get_phy_id(child, &phy_id))
+ phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
+ else
+ phy = get_phy_device(mdio, addr, is_c45);
if (!phy || IS_ERR(phy))
return 1;
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index a3df3428dac6..73e14184aafe 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -12,28 +12,6 @@
#include <linux/export.h>
/**
- * It maps 'enum phy_interface_t' found in include/linux/phy.h
- * into the device tree binding of 'phy-mode' or 'phy-connection-type',
- * so that Ethernet device driver can get phy interface from device tree.
- */
-static const char *phy_modes[] = {
- [PHY_INTERFACE_MODE_NA] = "",
- [PHY_INTERFACE_MODE_MII] = "mii",
- [PHY_INTERFACE_MODE_GMII] = "gmii",
- [PHY_INTERFACE_MODE_SGMII] = "sgmii",
- [PHY_INTERFACE_MODE_TBI] = "tbi",
- [PHY_INTERFACE_MODE_REVMII] = "rev-mii",
- [PHY_INTERFACE_MODE_RMII] = "rmii",
- [PHY_INTERFACE_MODE_RGMII] = "rgmii",
- [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
- [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
- [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
- [PHY_INTERFACE_MODE_RTBI] = "rtbi",
- [PHY_INTERFACE_MODE_SMII] = "smii",
- [PHY_INTERFACE_MODE_XGMII] = "xgmii",
-};
-
-/**
* of_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
*
@@ -52,8 +30,8 @@ int of_get_phy_mode(struct device_node *np)
if (err < 0)
return err;
- for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
- if (!strcasecmp(pm, phy_modes[i]))
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+ if (!strcasecmp(pm, phy_modes(i)))
return i;
return -ENODEV;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5a7910e61e17..6963bdf54175 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -7,6 +7,7 @@ menu "PTP clock support"
config PTP_1588_CLOCK
tristate "PTP clock support"
select PPS
+ select NET_PTP_CLASSIFY
help
The IEEE 1588 standard defines a method to precisely
synchronize distributed clocks over Ethernet networks. The
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 34a0c607318e..419056d7887e 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -25,6 +25,96 @@
#include "ptp_private.h"
+static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ptp_clock_request rq;
+ int err = 0;
+
+ memset(&rq, 0, sizeof(rq));
+
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ rq.type = PTP_CLK_REQ_EXTTS;
+ rq.extts.index = chan;
+ err = ops->enable(ops, &rq, 0);
+ break;
+ case PTP_PF_PEROUT:
+ rq.type = PTP_CLK_REQ_PEROUT;
+ rq.perout.index = chan;
+ err = ops->enable(ops, &rq, 0);
+ break;
+ case PTP_PF_PHYSYNC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ptp_clock_info *info = ptp->info;
+ struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
+ unsigned int i;
+
+ /* Check to see if any other pin previously had this function. */
+ for (i = 0; i < info->n_pins; i++) {
+ if (info->pin_config[i].func == func &&
+ info->pin_config[i].chan == chan) {
+ pin1 = &info->pin_config[i];
+ break;
+ }
+ }
+ if (pin1 && i == pin)
+ return 0;
+
+ /* Check the desired function and channel. */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (chan >= info->n_ext_ts)
+ return -EINVAL;
+ break;
+ case PTP_PF_PEROUT:
+ if (chan >= info->n_per_out)
+ return -EINVAL;
+ break;
+ case PTP_PF_PHYSYNC:
+ pr_err("sorry, cannot reassign the calibration pin\n");
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+
+ if (pin2->func == PTP_PF_PHYSYNC) {
+ pr_err("sorry, cannot reprogram the calibration pin\n");
+ return -EINVAL;
+ }
+
+ if (info->verify(info, pin, func, chan)) {
+ pr_err("driver cannot use function %u on pin %u\n", func, chan);
+ return -EOPNOTSUPP;
+ }
+
+ /* Disable whatever function was previously assigned. */
+ if (pin1) {
+ ptp_disable_pinfunc(info, func, chan);
+ pin1->func = PTP_PF_NONE;
+ pin1->chan = 0;
+ }
+ ptp_disable_pinfunc(info, pin2->func, pin2->chan);
+ pin2->func = func;
+ pin2->chan = chan;
+
+ return 0;
+}
+
int ptp_open(struct posix_clock *pc, fmode_t fmode)
{
return 0;
@@ -35,12 +125,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
struct ptp_clock_caps caps;
struct ptp_clock_request req;
struct ptp_sys_offset *sysoff = NULL;
+ struct ptp_pin_desc pd;
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_time *pct;
struct timespec ts;
int enable, err = 0;
- unsigned int i;
+ unsigned int i, pin_index;
switch (cmd) {
@@ -51,6 +142,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
caps.n_ext_ts = ptp->info->n_ext_ts;
caps.n_per_out = ptp->info->n_per_out;
caps.pps = ptp->info->pps;
+ caps.n_pins = ptp->info->n_pins;
if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
err = -EFAULT;
break;
@@ -126,6 +218,40 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EFAULT;
break;
+ case PTP_PIN_GETFUNC:
+ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
+ err = -EFAULT;
+ break;
+ }
+ pin_index = pd.index;
+ if (pin_index >= ops->n_pins) {
+ err = -EINVAL;
+ break;
+ }
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ pd = ops->pin_config[pin_index];
+ mutex_unlock(&ptp->pincfg_mux);
+ if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
+ err = -EFAULT;
+ break;
+
+ case PTP_PIN_SETFUNC:
+ if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
+ err = -EFAULT;
+ break;
+ }
+ pin_index = pd.index;
+ if (pin_index >= ops->n_pins) {
+ err = -EINVAL;
+ break;
+ }
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
+ mutex_unlock(&ptp->pincfg_mux);
+ break;
+
default:
err = -ENOTTY;
break;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index a8319b266643..e25d2bc898e5 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -169,6 +169,7 @@ static void delete_ptp_clock(struct posix_clock *pc)
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
mutex_destroy(&ptp->tsevq_mux);
+ mutex_destroy(&ptp->pincfg_mux);
ida_simple_remove(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@@ -203,6 +204,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
ptp->index = index;
spin_lock_init(&ptp->tsevq.lock);
mutex_init(&ptp->tsevq_mux);
+ mutex_init(&ptp->pincfg_mux);
init_waitqueue_head(&ptp->tsev_wq);
/* Create a new device in our class. */
@@ -249,6 +251,7 @@ no_sysfs:
device_destroy(ptp_class, ptp->devid);
no_device:
mutex_destroy(&ptp->tsevq_mux);
+ mutex_destroy(&ptp->pincfg_mux);
no_slot:
kfree(ptp);
no_memory:
@@ -305,6 +308,26 @@ int ptp_clock_index(struct ptp_clock *ptp)
}
EXPORT_SYMBOL(ptp_clock_index);
+int ptp_find_pin(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ptp_pin_desc *pin = NULL;
+ int i;
+
+ mutex_lock(&ptp->pincfg_mux);
+ for (i = 0; i < ptp->info->n_pins; i++) {
+ if (ptp->info->pin_config[i].func == func &&
+ ptp->info->pin_config[i].chan == chan) {
+ pin = &ptp->info->pin_config[i];
+ break;
+ }
+ }
+ mutex_unlock(&ptp->pincfg_mux);
+
+ return pin ? i : -1;
+}
+EXPORT_SYMBOL(ptp_find_pin);
+
/* module operations */
static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index 4a08727fcaf3..604d340f2095 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -244,6 +244,7 @@ static struct ptp_clock_info ptp_ixp_caps = {
.name = "IXP46X timer",
.max_adj = 66666655,
.n_ext_ts = N_EXT_TS,
+ .n_pins = 0,
.pps = 0,
.adjfreq = ptp_ixp_adjfreq,
.adjtime = ptp_ixp_adjtime,
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 71a2559278d7..90a106308c4f 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -514,6 +514,7 @@ static struct ptp_clock_info ptp_pch_caps = {
.name = "PCH timer",
.max_adj = 50000000,
.n_ext_ts = N_EXT_TS,
+ .n_pins = 0,
.pps = 0,
.adjfreq = ptp_pch_adjfreq,
.adjtime = ptp_pch_adjtime,
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index df03f2e30ad9..9c5d41421b65 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -48,8 +48,12 @@ struct ptp_clock {
long dialed_frequency; /* remembers the frequency adjustment */
struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
struct mutex tsevq_mux; /* one process at a time reading the fifo */
+ struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
wait_queue_head_t tsev_wq;
int defunct; /* tells readers to go away when clock is being removed */
+ struct device_attribute *pin_dev_attr;
+ struct attribute **pin_attr;
+ struct attribute_group pin_attr_group;
};
/*
@@ -69,6 +73,10 @@ static inline int queue_cnt(struct timestamp_event_queue *q)
* see ptp_chardev.c
*/
+/* caller must hold pincfg_mux */
+int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan);
+
long ptp_ioctl(struct posix_clock *pc,
unsigned int cmd, unsigned long arg);
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 13ec5311746a..302e626fe6b0 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/capability.h>
+#include <linux/slab.h>
#include "ptp_private.h"
@@ -42,6 +43,7 @@ PTP_SHOW_INT(max_adjustment, max_adj);
PTP_SHOW_INT(n_alarms, n_alarm);
PTP_SHOW_INT(n_external_timestamps, n_ext_ts);
PTP_SHOW_INT(n_periodic_outputs, n_per_out);
+PTP_SHOW_INT(n_programmable_pins, n_pins);
PTP_SHOW_INT(pps_available, pps);
static struct attribute *ptp_attrs[] = {
@@ -50,6 +52,7 @@ static struct attribute *ptp_attrs[] = {
&dev_attr_n_alarms.attr,
&dev_attr_n_external_timestamps.attr,
&dev_attr_n_periodic_outputs.attr,
+ &dev_attr_n_programmable_pins.attr,
&dev_attr_pps_available.attr,
NULL,
};
@@ -175,6 +178,63 @@ out:
return err;
}
+static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name)
+{
+ int i;
+ for (i = 0; i < ptp->info->n_pins; i++) {
+ if (!strcmp(ptp->info->pin_config[i].name, name))
+ return i;
+ }
+ return -1;
+}
+
+static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ unsigned int func, chan;
+ int index;
+
+ index = ptp_pin_name2index(ptp, attr->attr.name);
+ if (index < 0)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+
+ func = ptp->info->pin_config[index].func;
+ chan = ptp->info->pin_config[index].chan;
+
+ mutex_unlock(&ptp->pincfg_mux);
+
+ return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan);
+}
+
+static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_clock *ptp = dev_get_drvdata(dev);
+ unsigned int func, chan;
+ int cnt, err, index;
+
+ cnt = sscanf(buf, "%u %u", &func, &chan);
+ if (cnt != 2)
+ return -EINVAL;
+
+ index = ptp_pin_name2index(ptp, attr->attr.name);
+ if (index < 0)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
+ err = ptp_set_pinfunc(ptp, index, func, chan);
+ mutex_unlock(&ptp->pincfg_mux);
+ if (err)
+ return err;
+
+ return count;
+}
+
static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
static DEVICE_ATTR(period, 0220, NULL, period_store);
@@ -195,9 +255,56 @@ int ptp_cleanup_sysfs(struct ptp_clock *ptp)
if (info->pps)
device_remove_file(dev, &dev_attr_pps_enable);
+ if (info->n_pins) {
+ sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group);
+ kfree(ptp->pin_attr);
+ kfree(ptp->pin_dev_attr);
+ }
return 0;
}
+static int ptp_populate_pins(struct ptp_clock *ptp)
+{
+ struct device *dev = ptp->dev;
+ struct ptp_clock_info *info = ptp->info;
+ int err = -ENOMEM, i, n_pins = info->n_pins;
+
+ ptp->pin_dev_attr = kzalloc(n_pins * sizeof(*ptp->pin_dev_attr),
+ GFP_KERNEL);
+ if (!ptp->pin_dev_attr)
+ goto no_dev_attr;
+
+ ptp->pin_attr = kzalloc((1 + n_pins) * sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!ptp->pin_attr)
+ goto no_pin_attr;
+
+ for (i = 0; i < n_pins; i++) {
+ struct device_attribute *da = &ptp->pin_dev_attr[i];
+ sysfs_attr_init(&da->attr);
+ da->attr.name = info->pin_config[i].name;
+ da->attr.mode = 0644;
+ da->show = ptp_pin_show;
+ da->store = ptp_pin_store;
+ ptp->pin_attr[i] = &da->attr;
+ }
+
+ ptp->pin_attr_group.name = "pins";
+ ptp->pin_attr_group.attrs = ptp->pin_attr;
+
+ err = sysfs_create_group(&dev->kobj, &ptp->pin_attr_group);
+ if (err)
+ goto no_group;
+ return 0;
+
+no_group:
+ kfree(ptp->pin_attr);
+no_pin_attr:
+ kfree(ptp->pin_dev_attr);
+no_dev_attr:
+ return err;
+}
+
int ptp_populate_sysfs(struct ptp_clock *ptp)
{
struct device *dev = ptp->dev;
@@ -222,7 +329,15 @@ int ptp_populate_sysfs(struct ptp_clock *ptp)
if (err)
goto out4;
}
+ if (info->n_pins) {
+ err = ptp_populate_pins(ptp);
+ if (err)
+ goto out5;
+ }
return 0;
+out5:
+ if (info->pps)
+ device_remove_file(dev, &dev_attr_pps_enable);
out4:
if (info->n_per_out)
device_remove_file(dev, &dev_attr_period);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index a0de045eb227..5333b2c018e7 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -854,8 +854,11 @@ static inline int qeth_get_micros(void)
static inline int qeth_get_ip_version(struct sk_buff *skb)
{
- struct ethhdr *ehdr = (struct ethhdr *)skb->data;
- switch (ehdr->h_proto) {
+ __be16 *p = &((struct ethhdr *)skb->data)->h_proto;
+
+ if (*p == ETH_P_8021Q)
+ p += 2;
+ switch (*p) {
case ETH_P_IPV6:
return 6;
case ETH_P_IP:
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index a0aff2eb247c..22470a3b182f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4610,8 +4610,8 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_query_oat_command);
-int qeth_query_card_info_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
+static int qeth_query_card_info_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
struct qeth_query_card_info *card_info;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 908d82529ee9..8dea3f12ccc1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -241,7 +241,7 @@ static inline int qeth_l2_get_cast_type(struct qeth_card *card,
}
static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
- struct sk_buff *skb, int ipv, int cast_type)
+ struct sk_buff *skb, int cast_type)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
@@ -762,7 +762,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
elements_needed++;
skb_reset_mac_header(new_skb);
- qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
+ qeth_l2_fill_header(card, hdr, new_skb, cast_type);
hdr->hdr.l2.pkt_length = new_skb->len;
memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
skb_mac_header(new_skb), ETH_HLEN);
@@ -775,7 +775,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr = (struct qeth_hdr *)skb_push(new_skb,
sizeof(struct qeth_hdr));
skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
- qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
+ qeth_l2_fill_header(card, hdr, new_skb, cast_type);
}
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 8ca55c4e9db2..4e54d8540219 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -554,7 +554,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
dev->name);
priv->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return 0;
}
@@ -565,7 +565,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
dev->name);
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
priv->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return 0;
}
@@ -682,7 +682,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
work->grp);
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
- dev_kfree_skb(skb);
+ dev_consume_skb_any(skb);
return 0;
}
diff --git a/drivers/staging/rtl8821ae/rc.c b/drivers/staging/rtl8821ae/rc.c
index d387f13ea7dc..0cc32c60ddee 100644
--- a/drivers/staging/rtl8821ae/rc.c
+++ b/drivers/staging/rtl8821ae/rc.c
@@ -286,7 +286,6 @@ static void rtl_rate_free_sta(void *rtlpriv,
}
static struct rate_control_ops rtl_rate_ops = {
- .module = NULL,
.name = "rtl_rc",
.alloc = rtl_rate_alloc,
.free = rtl_rate_free,
diff --git a/drivers/staging/rtl8821ae/rtl8821ae/trx.c b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
index dd0f6dcb3085..f40a93c0d2bb 100644
--- a/drivers/staging/rtl8821ae/rtl8821ae/trx.c
+++ b/drivers/staging/rtl8821ae/rtl8821ae/trx.c
@@ -616,7 +616,7 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 77e4be21e44b..a10d014365f2 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -695,7 +695,7 @@ int wl_send(struct wl_private *lp)
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
- dev_kfree_skb(lp->txF.skb);
+ dev_consume_skb_any( lp->txF.skb );
lp->txF.skb = NULL;
lp->txF.port = 0;
@@ -1750,7 +1750,7 @@ int wl_send_dma(struct wl_private *lp, struct sk_buff *skb, int port)
WL_WDS_NETIF_STOP_QUEUE(lp);
lp->netif_queue_on = FALSE;
- dev_kfree_skb(skb);
+ dev_kfree_skb_any( skb );
return 0;
}
}
@@ -1775,7 +1775,7 @@ int wl_send_dma(struct wl_private *lp, struct sk_buff *skb, int port)
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
- dev_kfree_skb(skb);
+ dev_consume_skb_any( skb );
return TRUE;
} /* wl_send_dma */