aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/nfs/nfsroot.txt2
-rw-r--r--Documentation/networking/README.ipw22002
-rw-r--r--Documentation/networking/bonding.txt84
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rw-r--r--Documentation/networking/packet_mmap.txt26
-rw-r--r--Documentation/networking/pktgen.txt5
-rw-r--r--arch/microblaze/include/asm/system.h3
-rw-r--r--arch/powerpc/include/asm/system.h3
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/atm/he.c304
-rw-r--r--drivers/atm/he.h65
-rw-r--r--drivers/atm/nicstar.c5197
-rw-r--r--drivers/atm/nicstar.h602
-rw-r--r--drivers/atm/nicstarmac.c364
-rw-r--r--drivers/infiniband/core/addr.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c11
-rw-r--r--drivers/isdn/pcbit/drv.c10
-rw-r--r--drivers/isdn/sc/ioctl.c23
-rw-r--r--drivers/net/3c527.h6
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c8
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/arcnet/capmode.c177
-rw-r--r--drivers/net/arm/ixp4xx_eth.c11
-rw-r--r--drivers/net/atl1c/atl1c.h9
-rw-r--r--drivers/net/atl1c/atl1c_hw.c107
-rw-r--r--drivers/net/atl1c/atl1c_hw.h49
-rw-r--r--drivers/net/atl1c/atl1c_main.c348
-rw-r--r--drivers/net/atlx/atl1.h4
-rw-r--r--drivers/net/b44.c144
-rw-r--r--drivers/net/benet/be_cmds.c11
-rw-r--r--drivers/net/benet/be_hw.h2
-rw-r--r--drivers/net/benet/be_main.c101
-rw-r--r--drivers/net/bfin_mac.c123
-rw-r--r--drivers/net/bfin_mac.h5
-rw-r--r--drivers/net/bnx2.c43
-rw-r--r--drivers/net/bnx2x_link.c8
-rw-r--r--drivers/net/bonding/bond_alb.c33
-rw-r--r--drivers/net/bonding/bond_main.c130
-rw-r--r--drivers/net/bonding/bond_sysfs.c279
-rw-r--r--drivers/net/bonding/bonding.h14
-rw-r--r--drivers/net/caif/caif_serial.c12
-rw-r--r--drivers/net/can/mscan/mscan.h2
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/chelsio/common.h1
-rw-r--r--drivers/net/chelsio/subr.c49
-rw-r--r--drivers/net/cnic.c56
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/cxgb4/cxgb4.h6
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c281
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/cxgb4/l2t.c7
-rw-r--r--drivers/net/cxgb4/t4_hw.c94
-rw-r--r--drivers/net/cxgb4/t4_hw.h2
-rw-r--r--drivers/net/cxgb4/t4_regs.h3
-rw-r--r--drivers/net/cxgb4/t4fw_api.h34
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000e/82571.c2
-rw-r--r--drivers/net/e1000e/defines.h4
-rw-r--r--drivers/net/e1000e/e1000.h9
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c143
-rw-r--r--drivers/net/e1000e/hw.h15
-rw-r--r--drivers/net/e1000e/ich8lan.c452
-rw-r--r--drivers/net/e1000e/lib.c2
-rw-r--r--drivers/net/e1000e/netdev.c255
-rw-r--r--drivers/net/e1000e/param.c30
-rw-r--r--drivers/net/e1000e/phy.c5
-rw-r--r--drivers/net/ehea/ehea_qmr.h2
-rw-r--r--drivers/net/enic/vnic_vic.c5
-rw-r--r--drivers/net/enic/vnic_vic.h2
-rw-r--r--drivers/net/ethoc.c103
-rw-r--r--drivers/net/fec.c40
-rw-r--r--drivers/net/fec_mpc52xx_phy.c24
-rw-r--r--drivers/net/fsl_pq_mdio.h2
-rw-r--r--drivers/net/gianfar.c8
-rw-r--r--drivers/net/greth.c1
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/irda/irda-usb.h2
-rw-r--r--drivers/net/irda/ks959-sir.c2
-rw-r--r--drivers/net/irda/ksdazzle-sir.c2
-rw-r--r--drivers/net/irda/vlsi_ir.h6
-rw-r--r--drivers/net/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h26
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c41
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c35
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c369
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c29
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ksz884x.c2
-rw-r--r--drivers/net/loopback.c61
-rw-r--r--drivers/net/mac8390.c57
-rw-r--r--drivers/net/macvlan.c49
-rw-r--r--drivers/net/mlx4/en_netdev.c1
-rw-r--r--drivers/net/mlx4/eq.c14
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/ppp_generic.c18
-rw-r--r--drivers/net/pppoe.c3
-rw-r--r--drivers/net/ps3_gelic_wireless.h10
-rw-r--r--drivers/net/qlcnic/qlcnic.h150
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c576
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c20
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h84
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c62
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c144
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c523
-rw-r--r--drivers/net/qlge/qlge.h24
-rw-r--r--drivers/net/r6040.c302
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/sfc/efx.c75
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/falcon.c8
-rw-r--r--drivers/net/sfc/mcdi_phy.c21
-rw-r--r--drivers/net/sfc/net_driver.h44
-rw-r--r--drivers/net/sfc/nic.c55
-rw-r--r--drivers/net/sfc/nic.h4
-rw-r--r--drivers/net/sfc/rx.c393
-rw-r--r--drivers/net/sfc/selftest.c28
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sky2.h6
-rw-r--r--drivers/net/tehuti.h2
-rw-r--r--drivers/net/tg3.c163
-rw-r--r--drivers/net/tg3.h17
-rw-r--r--drivers/net/tulip/de2104x.c4
-rw-r--r--drivers/net/tulip/eeprom.c10
-rw-r--r--drivers/net/tulip/tulip.h64
-rw-r--r--drivers/net/tulip/tulip_core.c122
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/typhoon.h26
-rw-r--r--drivers/net/ucc_geth.c14
-rw-r--r--drivers/net/ucc_geth.h46
-rw-r--r--drivers/net/usb/asix.c2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/sierra_net.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/via-velocity.h12
-rw-r--r--drivers/net/vxge/vxge-main.c93
-rw-r--r--drivers/net/wan/farsync.c14
-rw-r--r--drivers/net/wan/hd64570.h2
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/sdla.c13
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/fw.c8
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c2
-rw-r--r--drivers/net/wireless/adm8211.h6
-rw-r--r--drivers/net/wireless/airo.c32
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/at76c50x-usb.h40
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h19
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c373
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c79
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h9
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c152
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h310
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c13
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c82
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c64
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c116
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c743
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h78
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c372
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c118
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h (renamed from drivers/net/wireless/ath/ath9k/ar9003_initvals.h)254
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h1785
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c165
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c714
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c513
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h298
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h85
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c314
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h77
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c68
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c618
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h37
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c168
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c491
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c86
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c185
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h91
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c54
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c411
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c184
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c296
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h62
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c144
-rw-r--r--drivers/net/wireless/b43/b43.h6
-rw-r--r--drivers/net/wireless/b43/dma.c69
-rw-r--r--drivers/net/wireless/b43/dma.h8
-rw-r--r--drivers/net/wireless/b43/xmit.h20
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h6
-rw-r--r--drivers/net/wireless/b43legacy/dma.c49
-rw-r--r--drivers/net/wireless/b43legacy/dma.h8
-rw-r--r--drivers/net/wireless/b43legacy/xmit.h10
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h18
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h10
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h32
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c22
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h16
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c7
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h122
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h62
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c203
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c83
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c345
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c230
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c77
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c123
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c319
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h249
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c266
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c88
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h88
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c72
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c136
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h50
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h32
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h60
-rw-r--r--drivers/net/wireless/libertas/cmd.c37
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c30
-rw-r--r--drivers/net/wireless/libertas/decl.h2
-rw-r--r--drivers/net/wireless/libertas/dev.h6
-rw-r--r--drivers/net/wireless/libertas/ethtool.c24
-rw-r--r--drivers/net/wireless/libertas/host.h120
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c58
-rw-r--r--drivers/net/wireless/libertas/if_usb.c12
-rw-r--r--drivers/net/wireless/libertas/main.c79
-rw-r--r--drivers/net/wireless/libertas/radiotap.h4
-rw-r--r--drivers/net/wireless/libertas/rx.c8
-rw-r--r--drivers/net/wireless/libertas/scan.c2
-rw-r--r--drivers/net/wireless/libertas/types.h66
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c5
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mwl8k.c78
-rw-r--r--drivers/net/wireless/orinoco/fw.c2
-rw-r--r--drivers/net/wireless/orinoco/hermes.h18
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c10
-rw-r--r--drivers/net/wireless/orinoco/hw.c6
-rw-r--r--drivers/net/wireless/orinoco/main.c10
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c8
-rw-r--r--drivers/net/wireless/orinoco/wext.c6
-rw-r--r--drivers/net/wireless/p54/eeprom.c4
-rw-r--r--drivers/net/wireless/p54/net2280.h16
-rw-r--r--drivers/net/wireless/p54/p54pci.h6
-rw-r--r--drivers/net/wireless/p54/p54spi.c5
-rw-r--r--drivers/net/wireless/p54/p54spi.h2
-rw-r--r--drivers/net/wireless/p54/p54usb.c6
-rw-r--r--drivers/net/wireless/p54/p54usb.h6
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c13
-rw-r--r--drivers/net/wireless/prism54/isl_oid.h18
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h4
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h2
-rw-r--r--drivers/net/wireless/rndis_wlan.c70
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h63
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c210
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h13
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c136
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h19
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c106
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h37
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h42
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c23
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h26
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c51
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c43
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c33
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h19
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c59
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c51
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h8
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h2
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h102
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_event.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c40
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h6
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h33
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h102
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c41
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h66
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c10
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ini.h123
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c95
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c11
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c36
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h26
-rw-r--r--drivers/net/wireless/wl3501.h16
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h15
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h14
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/s390/net/qeth_core.h22
-rw-r--r--drivers/s390/net/qeth_core_main.c369
-rw-r--r--drivers/s390/net/qeth_core_mpc.h5
-rw-r--r--drivers/s390/net/qeth_l2_main.c101
-rw-r--r--drivers/s390/net/qeth_l3_main.c252
-rw-r--r--drivers/s390/net/smsgiucv.c11
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c4
-rw-r--r--drivers/ssb/driver_chipcommon.c25
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c17
-rw-r--r--drivers/ssb/main.c76
-rw-r--r--drivers/ssb/pci.c15
-rw-r--r--drivers/staging/batman-adv/hard-interface.c2
-rw-r--r--drivers/usb/gadget/rndis.c2
-rw-r--r--drivers/vhost/net.c13
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--include/linux/caif/caif_socket.h34
-rw-r--r--include/linux/ethtool.h40
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_bonding.h1
-rw-r--r--include/linux/if_bridge.h2
-rw-r--r--include/linux/if_ether.h2
-rw-r--r--include/linux/if_fddi.h8
-rw-r--r--include/linux/if_frad.h2
-rw-r--r--include/linux/if_hippi.h8
-rw-r--r--include/linux/if_link.h3
-rw-r--r--include/linux/if_macvlan.h4
-rw-r--r--include/linux/if_packet.h1
-rw-r--r--include/linux/if_pppox.h8
-rw-r--r--include/linux/in.h1
-rw-r--r--include/linux/ipv6.h4
-rw-r--r--include/linux/isdnif.h2
-rw-r--r--include/linux/mISDNif.h2
-rw-r--r--include/linux/nbd.h2
-rw-r--r--include/linux/ncp.h10
-rw-r--r--include/linux/ncp_fs_sb.h14
-rw-r--r--include/linux/netdevice.h121
-rw-r--r--include/linux/netfilter/Kbuild1
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nfnetlink_log.h1
-rw-r--r--include/linux/netfilter/xt_IDLETIMER.h45
-rw-r--r--include/linux/netpoll.h24
-rw-r--r--include/linux/nl80211.h2
-rw-r--r--include/linux/notifier.h1
-rw-r--r--include/linux/phonet.h4
-rw-r--r--include/linux/rds.h12
-rw-r--r--include/linux/sctp.h80
-rw-r--r--include/linux/skbuff.h11
-rw-r--r--include/linux/snmp.h1
-rw-r--r--include/linux/socket.h5
-rw-r--r--include/linux/ssb/ssb.h159
-rw-r--r--include/linux/u64_stats_sync.h107
-rw-r--r--include/linux/user_namespace.h14
-rw-r--r--include/linux/wlp.h22
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/caif/caif_dev.h8
-rw-r--r--include/net/caif/caif_layer.h6
-rw-r--r--include/net/caif/cfcnfg.h16
-rw-r--r--include/net/caif/cfsrvl.h15
-rw-r--r--include/net/cfg80211.h137
-rw-r--r--include/net/dn_dev.h8
-rw-r--r--include/net/dn_nsp.h16
-rw-r--r--include/net/dn_route.h4
-rw-r--r--include/net/genetlink.h15
-rw-r--r--include/net/inet_sock.h3
-rw-r--r--include/net/inetpeer.h30
-rw-r--r--include/net/ip.h7
-rw-r--r--include/net/ip6_fib.h10
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ipip.h2
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/ipx.h8
-rw-r--r--include/net/mac80211.h81
-rw-r--r--include/net/mip6.h2
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h17
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
-rw-r--r--include/net/netfilter/xt_rateest.h11
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/phonet/pn_dev.h1
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/sch_generic.h27
-rw-r--r--include/net/scm.h30
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sock.h13
-rw-r--r--include/net/tcp.h29
-rw-r--r--include/rxrpc/packet.h8
-rw-r--r--kernel/user_namespace.c44
-rw-r--r--net/8021q/vlanproc.c13
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/bluetooth/bnep/bnep.h8
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/bridge/br_device.c135
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_forward.c34
-rw-r--r--net/bridge/br_if.c33
-rw-r--r--net/bridge/br_input.c21
-rw-r--r--net/bridge/br_netfilter.c29
-rw-r--r--net/bridge/br_netlink.c9
-rw-r--r--net/bridge/br_notify.c5
-rw-r--r--net/bridge/br_private.h64
-rw-r--r--net/bridge/br_stp_bpdu.c5
-rw-r--r--net/bridge/netfilter/ebt_redirect.c3
-rw-r--r--net/bridge/netfilter/ebt_ulog.c8
-rw-r--r--net/bridge/netfilter/ebtables.c11
-rw-r--r--net/caif/caif_config_util.c5
-rw-r--r--net/caif/caif_dev.c12
-rw-r--r--net/caif/caif_socket.c61
-rw-r--r--net/caif/cfcnfg.c54
-rw-r--r--net/caif/cfctrl.c8
-rw-r--r--net/caif/cfdbgl.c2
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cfpkt_skbuff.c5
-rw-r--r--net/caif/cfrfml.c318
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c26
-rw-r--r--net/caif/cfutill.c8
-rw-r--r--net/caif/cfveil.c7
-rw-r--r--net/caif/cfvidl.c2
-rw-r--r--net/caif/chnl_net.c67
-rw-r--r--net/can/raw.c11
-rw-r--r--net/compat.c53
-rw-r--r--net/core/dev.c269
-rw-r--r--net/core/gen_estimator.c1
-rw-r--r--net/core/net-sysfs.c12
-rw-r--r--net/core/netpoll.c195
-rw-r--r--net/core/pktgen.c42
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/scm.c24
-rw-r--r--net/core/sock.c39
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c30
-rw-r--r--net/dccp/proto.c11
-rw-r--r--net/decnet/dn_route.c158
-rw-r--r--net/econet/af_econet.c27
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ipv4/af_inet.c6
-rw-r--r--net/ipv4/arp.c21
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv4/icmp.c30
-rw-r--r--net/ipv4/igmp.c23
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inetpeer.c244
-rw-r--r--net/ipv4/ip_forward.c10
-rw-r--r--net/ipv4/ip_fragment.c12
-rw-r--r--net/ipv4/ip_gre.c14
-rw-r--r--net/ipv4/ip_input.c24
-rw-r--r--net/ipv4/ip_output.c60
-rw-r--r--net/ipv4/ip_sockglue.c45
-rw-r--r--net/ipv4/ipconfig.c7
-rw-r--r--net/ipv4/ipip.c8
-rw-r--r--net/ipv4/ipmr.c8
-rw-r--r--net/ipv4/netfilter.c12
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_queue.c57
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c48
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c2
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c22
-rw-r--r--net/ipv4/route.c504
-rw-r--r--net/ipv4/syncookies.c71
-rw-r--r--net/ipv4/tcp.c13
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c131
-rw-r--r--net/ipv4/tcp_output.c59
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/addrlabel.c6
-rw-r--r--net/ipv6/af_inet6.c9
-rw-r--r--net/ipv6/anycast.c96
-rw-r--r--net/ipv6/datagram.c18
-rw-r--r--net/ipv6/exthdrs.c34
-rw-r--r--net/ipv6/fib6_rules.c10
-rw-r--r--net/ipv6/inet6_connection_sock.c9
-rw-r--r--net/ipv6/ip6_fib.c30
-rw-r--r--net/ipv6/ip6_output.c38
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/mcast.c190
-rw-r--r--net/ipv6/ndisc.c8
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/ip6_queue.c57
-rw-r--r--net/ipv6/netfilter/ip6_tables.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c9
-rw-r--r--net/ipv6/raw.c32
-rw-r--r--net/ipv6/reassembly.c10
-rw-r--r--net/ipv6/route.c319
-rw-r--r--net/ipv6/sit.c16
-rw-r--r--net/ipv6/syncookies.c54
-rw-r--r--net/ipv6/tcp_ipv6.c42
-rw-r--r--net/ipv6/udp.c11
-rw-r--r--net/irda/irttp.c14
-rw-r--r--net/iucv/iucv.c14
-rw-r--r--net/l2tp/l2tp_ip.c6
-rw-r--r--net/mac80211/Kconfig7
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-rx.c123
-rw-r--r--net/mac80211/agg-tx.c554
-rw-r--r--net/mac80211/cfg.c77
-rw-r--r--net/mac80211/debugfs.c154
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_sta.c65
-rw-r--r--net/mac80211/driver-ops.h95
-rw-r--r--net/mac80211/driver-trace.h188
-rw-r--r--net/mac80211/ht.c50
-rw-r--r--net/mac80211/ibss.c109
-rw-r--r--net/mac80211/ieee80211_i.h73
-rw-r--r--net/mac80211/iface.c188
-rw-r--r--net/mac80211/key.c290
-rw-r--r--net/mac80211/key.h30
-rw-r--r--net/mac80211/main.c116
-rw-r--r--net/mac80211/mesh.c73
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mesh_pathtbl.c4
-rw-r--r--net/mac80211/mlme.c200
-rw-r--r--net/mac80211/pm.c18
-rw-r--r--net/mac80211/rate.h13
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c824
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h128
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c120
-rw-r--r--net/mac80211/rx.c177
-rw-r--r--net/mac80211/sta_info.c22
-rw-r--r--net/mac80211/sta_info.h97
-rw-r--r--net/mac80211/status.c2
-rw-r--r--net/mac80211/tx.c93
-rw-r--r--net/mac80211/util.c31
-rw-r--r--net/mac80211/work.c2
-rw-r--r--net/mac80211/wpa.c8
-rw-r--r--net/netfilter/Kconfig12
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c86
-rw-r--r--net/netfilter/nf_conntrack_core.c46
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c12
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c32
-rw-r--r--net/netfilter/nfnetlink_log.c73
-rw-r--r--net/netfilter/nfnetlink_queue.c39
-rw-r--r--net/netfilter/xt_CT.c4
-rw-r--r--net/netfilter/xt_IDLETIMER.c314
-rw-r--r--net/netfilter/xt_NOTRACK.c2
-rw-r--r--net/netfilter/xt_RATEEST.c12
-rw-r--r--net/netfilter/xt_TCPMSS.c8
-rw-r--r--net/netfilter/xt_TEE.c8
-rw-r--r--net/netfilter/xt_cluster.c2
-rw-r--r--net/netfilter/xt_conntrack.c11
-rw-r--r--net/netfilter/xt_sctp.c3
-rw-r--r--net/netfilter/xt_socket.c2
-rw-r--r--net/netfilter/xt_state.c14
-rw-r--r--net/netfilter/xt_statistic.c19
-rw-r--r--net/netlink/af_netlink.c11
-rw-r--r--net/packet/af_packet.c37
-rw-r--r--net/phonet/pn_dev.c15
-rw-r--r--net/rxrpc/ar-peer.c4
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c12
-rw-r--r--net/sched/sch_generic.c23
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/socket.c164
-rw-r--r--net/unix/af_unix.c97
-rw-r--r--net/wireless/chan.c5
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/mlme.c8
-rw-r--r--net/wireless/nl80211.c60
-rw-r--r--net/wireless/util.c4
653 files changed, 25875 insertions, 15762 deletions
diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt
index 3ba0b945aaf8..f2430a7974e1 100644
--- a/Documentation/filesystems/nfs/nfsroot.txt
+++ b/Documentation/filesystems/nfs/nfsroot.txt
@@ -124,6 +124,8 @@ ip=<client-ip>:<server-ip>:<gw-ip>:<netmask>:<hostname>:<device>:<autoconf>
<hostname> Name of the client. May be supplied by autoconfiguration,
but its absence will not trigger autoconfiguration.
+ If specified and DHCP is used, the user provided hostname will
+ be carried in the DHCP request to hopefully update DNS record.
Default: Client IP address is used in ASCII notation.
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200
index 80c728522c4c..e4d3267071e4 100644
--- a/Documentation/networking/README.ipw2200
+++ b/Documentation/networking/README.ipw2200
@@ -171,7 +171,7 @@ Where the supported parameter are:
led
Can be used to turn on experimental LED code.
- 0 = Off, 1 = On. Default is 0.
+ 0 = Off, 1 = On. Default is 1.
mode
Can be used to set the default mode of the adapter.
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 61f516b135b4..d0914781830e 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -49,6 +49,7 @@ Table of Contents
3.3 Configuring Bonding Manually with Ifenslave
3.3.1 Configuring Multiple Bonds Manually
3.4 Configuring Bonding Manually via Sysfs
+3.5 Overriding Configuration for Special Cases
4. Querying Bonding Configuration
4.1 Bonding Configuration
@@ -1318,8 +1319,87 @@ echo 2000 > /sys/class/net/bond1/bonding/arp_interval
echo +eth2 > /sys/class/net/bond1/bonding/slaves
echo +eth3 > /sys/class/net/bond1/bonding/slaves
-
-4. Querying Bonding Configuration
+3.5 Overriding Configuration for Special Cases
+----------------------------------------------
+When using the bonding driver, the physical port which transmits a frame is
+typically selected by the bonding driver, and is not relevant to the user or
+system administrator. The output port is simply selected using the policies of
+the selected bonding mode. On occasion however, it is helpful to direct certain
+classes of traffic to certain physical interfaces on output to implement
+slightly more complex policies. For example, to reach a web server over a
+bonded interface in which eth0 connects to a private network, while eth1
+connects via a public network, it may be desirous to bias the bond to send said
+traffic over eth0 first, using eth1 only as a fall back, while all other traffic
+can safely be sent over either interface. Such configurations may be achieved
+using the traffic control utilities inherent in linux.
+
+By default the bonding driver is multiqueue aware and 16 queues are created
+when the driver initializes (see Documentation/networking/multiqueue.txt
+for details). If more or less queues are desired the module parameter
+tx_queues can be used to change this value. There is no sysfs parameter
+available as the allocation is done at module init time.
+
+The output of the file /proc/net/bonding/bondX has changed so the output Queue
+ID is now printed for each slave:
+
+Bonding Mode: fault-tolerance (active-backup)
+Primary Slave: None
+Currently Active Slave: eth0
+MII Status: up
+MII Polling Interval (ms): 0
+Up Delay (ms): 0
+Down Delay (ms): 0
+
+Slave Interface: eth0
+MII Status: up
+Link Failure Count: 0
+Permanent HW addr: 00:1a:a0:12:8f:cb
+Slave queue ID: 0
+
+Slave Interface: eth1
+MII Status: up
+Link Failure Count: 0
+Permanent HW addr: 00:1a:a0:12:8f:cc
+Slave queue ID: 2
+
+The queue_id for a slave can be set using the command:
+
+# echo "eth1:2" > /sys/class/net/bond0/bonding/queue_id
+
+Any interface that needs a queue_id set should set it with multiple calls
+like the one above until proper priorities are set for all interfaces. On
+distributions that allow configuration via initscripts, multiple 'queue_id'
+arguments can be added to BONDING_OPTS to set all needed slave queues.
+
+These queue id's can be used in conjunction with the tc utility to configure
+a multiqueue qdisc and filters to bias certain traffic to transmit on certain
+slave devices. For instance, say we wanted, in the above configuration to
+force all traffic bound to 192.168.1.100 to use eth1 in the bond as its output
+device. The following commands would accomplish this:
+
+# tc qdisc add dev bond0 handle 1 root multiq
+
+# tc filter add dev bond0 protocol ip parent 1: prio 1 u32 match ip dst \
+ 192.168.1.100 action skbedit queue_mapping 2
+
+These commands tell the kernel to attach a multiqueue queue discipline to the
+bond0 interface and filter traffic enqueued to it, such that packets with a dst
+ip of 192.168.1.100 have their output queue mapping value overwritten to 2.
+This value is then passed into the driver, causing the normal output path
+selection policy to be overridden, selecting instead qid 2, which maps to eth1.
+
+Note that qid values begin at 1. Qid 0 is reserved to initiate to the driver
+that normal output policy selection should take place. One benefit to simply
+leaving the qid for a slave to 0 is the multiqueue awareness in the bonding
+driver that is now present. This awareness allows tc filters to be placed on
+slave devices as well as bond devices and the bonding driver will simply act as
+a pass-through for selecting output queues on the slave device rather than
+output port selection.
+
+This feature first appeared in bonding driver version 3.7.0 and support for
+output slave selection was limited to round-robin and active-backup modes.
+
+4 Querying Bonding Configuration
=================================
4.1 Bonding Configuration
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index d0536b5a4e01..f350c69b2bb4 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -903,7 +903,7 @@ arp_ignore - INTEGER
arp_notify - BOOLEAN
Define mode for notification of address and device changes.
0 - (default): do nothing
- 1 - Generate gratuitous arp replies when device is brought up
+ 1 - Generate gratuitous arp requests when device is brought up
or hardware address changes.
arp_accept - BOOLEAN
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 98f71a5cef00..2546aa4dc232 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -493,6 +493,32 @@ The user can also use poll() to check if a buffer is available:
pfd.events = POLLOUT;
retval = poll(&pfd, 1, timeout);
+-------------------------------------------------------------------------------
++ PACKET_TIMESTAMP
+-------------------------------------------------------------------------------
+
+The PACKET_TIMESTAMP setting determines the source of the timestamp in
+the packet meta information. If your NIC is capable of timestamping
+packets in hardware, you can request those hardware timestamps to used.
+Note: you may need to enable the generation of hardware timestamps with
+SIOCSHWTSTAMP.
+
+PACKET_TIMESTAMP accepts the same integer bit field as
+SO_TIMESTAMPING. However, only the SOF_TIMESTAMPING_SYS_HARDWARE
+and SOF_TIMESTAMPING_RAW_HARDWARE values are recognized by
+PACKET_TIMESTAMP. SOF_TIMESTAMPING_SYS_HARDWARE takes precedence over
+SOF_TIMESTAMPING_RAW_HARDWARE if both bits are set.
+
+ int req = 0;
+ req |= SOF_TIMESTAMPING_SYS_HARDWARE;
+ setsockopt(fd, SOL_PACKET, PACKET_TIMESTAMP, (void *) &req, sizeof(req))
+
+If PACKET_TIMESTAMP is not set, a software timestamp generated inside
+the networking stack is used (the behavior before this setting was added).
+
+See include/linux/net_tstamp.h and Documentation/networking/timestamping
+for more information on hardware timestamps.
+
--------------------------------------------------------------------------------
+ THANKS
--------------------------------------------------------------------------------
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index 61bb645d50e0..75e4fd708ccb 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -151,6 +151,8 @@ Examples:
pgset stop aborts injection. Also, ^C aborts generator.
+ pgset "rate 300M" set rate to 300 Mb/s
+ pgset "ratep 1000000" set rate to 1Mpps
Example scripts
===============
@@ -241,6 +243,9 @@ src6
flows
flowlen
+rate
+ratep
+
References:
ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/
ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index 48c4f0335e3f..81e1f7d5b4cb 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -101,10 +101,7 @@ extern struct dentry *of_debugfs_root;
* MicroBlaze doesn't handle unaligned accesses in hardware.
*
* Based on this we force the IP header alignment in network drivers.
- * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
- * cacheline alignment of buffers.
*/
#define NET_IP_ALIGN 2
-#define NET_SKB_PAD L1_CACHE_BYTES
#endif /* _ASM_MICROBLAZE_SYSTEM_H */
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index a6297c67c3d6..6c294acac848 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -515,11 +515,8 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
- * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
- * cacheline alignment of buffers.
*/
#define NET_IP_ALIGN 0
-#define NET_SKB_PAD L1_CACHE_BYTES
#define cmpxchg64(ptr, o, n) \
({ \
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index f1a0a00b3b07..be7461c9a87e 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -177,7 +177,7 @@ config ATM_ZATM_DEBUG
config ATM_NICSTAR
tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
- depends on PCI && !64BIT && VIRT_TO_BUS
+ depends on PCI
help
The NICStAR chipset family is used in a large number of ATM NICs for
25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 56c2e99e458f..ea9cbe596a28 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -67,6 +67,7 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/bitmap.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -778,61 +779,39 @@ he_init_cs_block_rcm(struct he_dev *he_dev)
static int __devinit
he_init_group(struct he_dev *he_dev, int group)
{
+ struct he_buff *heb, *next;
+ dma_addr_t mapping;
int i;
- /* small buffer pool */
- he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
- CONFIG_RBPS_BUFSIZE, 8, 0);
- if (he_dev->rbps_pool == NULL) {
- hprintk("unable to create rbps pages\n");
+ he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
+ he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
+ he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
+ he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
+ G0_RBPS_BS + (group * 32));
+
+ /* bitmap table */
+ he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
+ * sizeof(unsigned long), GFP_KERNEL);
+ if (!he_dev->rbpl_table) {
+ hprintk("unable to allocate rbpl bitmap table\n");
return -ENOMEM;
}
+ bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
- he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
- CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
- if (he_dev->rbps_base == NULL) {
- hprintk("failed to alloc rbps_base\n");
- goto out_destroy_rbps_pool;
+ /* rbpl_virt 64-bit pointers */
+ he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
+ * sizeof(struct he_buff *), GFP_KERNEL);
+ if (!he_dev->rbpl_virt) {
+ hprintk("unable to allocate rbpl virt table\n");
+ goto out_free_rbpl_table;
}
- memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
- he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
- if (he_dev->rbps_virt == NULL) {
- hprintk("failed to alloc rbps_virt\n");
- goto out_free_rbps_base;
- }
-
- for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
- dma_addr_t dma_handle;
- void *cpuaddr;
-
- cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
- if (cpuaddr == NULL)
- goto out_free_rbps_virt;
-
- he_dev->rbps_virt[i].virt = cpuaddr;
- he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
- he_dev->rbps_base[i].phys = dma_handle;
-
- }
- he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
-
- he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
- he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
- G0_RBPS_T + (group * 32));
- he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
- G0_RBPS_BS + (group * 32));
- he_writel(he_dev,
- RBP_THRESH(CONFIG_RBPS_THRESH) |
- RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
- RBP_INT_ENB,
- G0_RBPS_QI + (group * 32));
/* large buffer pool */
he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
- CONFIG_RBPL_BUFSIZE, 8, 0);
+ CONFIG_RBPL_BUFSIZE, 64, 0);
if (he_dev->rbpl_pool == NULL) {
hprintk("unable to create rbpl pool\n");
- goto out_free_rbps_virt;
+ goto out_free_rbpl_virt;
}
he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
@@ -842,30 +821,29 @@ he_init_group(struct he_dev *he_dev, int group)
goto out_destroy_rbpl_pool;
}
memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
- he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
- if (he_dev->rbpl_virt == NULL) {
- hprintk("failed to alloc rbpl_virt\n");
- goto out_free_rbpl_base;
- }
+
+ INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
- dma_addr_t dma_handle;
- void *cpuaddr;
- cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
- if (cpuaddr == NULL)
- goto out_free_rbpl_virt;
+ heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
+ if (!heb)
+ goto out_free_rbpl;
+ heb->mapping = mapping;
+ list_add(&heb->entry, &he_dev->rbpl_outstanding);
- he_dev->rbpl_virt[i].virt = cpuaddr;
- he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
- he_dev->rbpl_base[i].phys = dma_handle;
+ set_bit(i, he_dev->rbpl_table);
+ he_dev->rbpl_virt[i] = heb;
+ he_dev->rbpl_hint = i + 1;
+ he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
+ he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
}
he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
G0_RBPL_T + (group * 32));
- he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
+ he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
G0_RBPL_BS + (group * 32));
he_writel(he_dev,
RBP_THRESH(CONFIG_RBPL_THRESH) |
@@ -879,7 +857,7 @@ he_init_group(struct he_dev *he_dev, int group)
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
if (he_dev->rbrq_base == NULL) {
hprintk("failed to allocate rbrq\n");
- goto out_free_rbpl_virt;
+ goto out_free_rbpl;
}
memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
@@ -920,33 +898,20 @@ out_free_rbpq_base:
pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
sizeof(struct he_rbrq), he_dev->rbrq_base,
he_dev->rbrq_phys);
- i = CONFIG_RBPL_SIZE;
-out_free_rbpl_virt:
- while (i--)
- pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt,
- he_dev->rbpl_base[i].phys);
- kfree(he_dev->rbpl_virt);
+out_free_rbpl:
+ list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
+ pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
-out_free_rbpl_base:
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
sizeof(struct he_rbp), he_dev->rbpl_base,
he_dev->rbpl_phys);
out_destroy_rbpl_pool:
pci_pool_destroy(he_dev->rbpl_pool);
+out_free_rbpl_virt:
+ kfree(he_dev->rbpl_virt);
+out_free_rbpl_table:
+ kfree(he_dev->rbpl_table);
- i = CONFIG_RBPS_SIZE;
-out_free_rbps_virt:
- while (i--)
- pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt,
- he_dev->rbps_base[i].phys);
- kfree(he_dev->rbps_virt);
-
-out_free_rbps_base:
- pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE *
- sizeof(struct he_rbp), he_dev->rbps_base,
- he_dev->rbps_phys);
-out_destroy_rbps_pool:
- pci_pool_destroy(he_dev->rbps_pool);
return -ENOMEM;
}
@@ -1576,9 +1541,10 @@ he_start(struct atm_dev *dev)
static void
he_stop(struct he_dev *he_dev)
{
- u16 command;
- u32 gen_cntl_0, reg;
+ struct he_buff *heb, *next;
struct pci_dev *pci_dev;
+ u32 gen_cntl_0, reg;
+ u16 command;
pci_dev = he_dev->pci_dev;
@@ -1619,37 +1585,19 @@ he_stop(struct he_dev *he_dev)
he_dev->hsp, he_dev->hsp_phys);
if (he_dev->rbpl_base) {
- int i;
-
- for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
- void *cpuaddr = he_dev->rbpl_virt[i].virt;
- dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
+ list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
+ pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
- pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
- }
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
}
+ kfree(he_dev->rbpl_virt);
+ kfree(he_dev->rbpl_table);
+
if (he_dev->rbpl_pool)
pci_pool_destroy(he_dev->rbpl_pool);
- if (he_dev->rbps_base) {
- int i;
-
- for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
- void *cpuaddr = he_dev->rbps_virt[i].virt;
- dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
-
- pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
- }
- pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
- * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
- }
-
- if (he_dev->rbps_pool)
- pci_pool_destroy(he_dev->rbps_pool);
-
if (he_dev->rbrq_base)
pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
he_dev->rbrq_base, he_dev->rbrq_phys);
@@ -1679,13 +1627,13 @@ static struct he_tpd *
__alloc_tpd(struct he_dev *he_dev)
{
struct he_tpd *tpd;
- dma_addr_t dma_handle;
+ dma_addr_t mapping;
- tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
+ tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
if (tpd == NULL)
return NULL;
- tpd->status = TPD_ADDR(dma_handle);
+ tpd->status = TPD_ADDR(mapping);
tpd->reserved = 0;
tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
@@ -1714,13 +1662,12 @@ he_service_rbrq(struct he_dev *he_dev, int group)
struct he_rbrq *rbrq_tail = (struct he_rbrq *)
((unsigned long)he_dev->rbrq_base |
he_dev->hsp->group[group].rbrq_tail);
- struct he_rbp *rbp = NULL;
unsigned cid, lastcid = -1;
- unsigned buf_len = 0;
struct sk_buff *skb;
struct atm_vcc *vcc = NULL;
struct he_vcc *he_vcc;
- struct he_iovec *iov;
+ struct he_buff *heb, *next;
+ int i;
int pdus_assembled = 0;
int updated = 0;
@@ -1740,44 +1687,35 @@ he_service_rbrq(struct he_dev *he_dev, int group)
RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
- if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
- rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
- else
- rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
-
- buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
- cid = RBRQ_CID(he_dev->rbrq_head);
+ i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
+ heb = he_dev->rbpl_virt[i];
+ cid = RBRQ_CID(he_dev->rbrq_head);
if (cid != lastcid)
vcc = __find_vcc(he_dev, cid);
lastcid = cid;
- if (vcc == NULL) {
- hprintk("vcc == NULL (cid 0x%x)\n", cid);
- if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
- rbp->status &= ~RBP_LOANED;
+ if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
+ hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
+ if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
+ clear_bit(i, he_dev->rbpl_table);
+ list_del(&heb->entry);
+ pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+ }
goto next_rbrq_entry;
}
- he_vcc = HE_VCC(vcc);
- if (he_vcc == NULL) {
- hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
- if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
- rbp->status &= ~RBP_LOANED;
- goto next_rbrq_entry;
- }
-
if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
atomic_inc(&vcc->stats->rx_drop);
goto return_host_buffers;
}
- he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
- he_vcc->iov_tail->iov_len = buf_len;
- he_vcc->pdu_len += buf_len;
- ++he_vcc->iov_tail;
+ heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
+ clear_bit(i, he_dev->rbpl_table);
+ list_move_tail(&heb->entry, &he_vcc->buffers);
+ he_vcc->pdu_len += heb->len;
if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
lastcid = -1;
@@ -1786,12 +1724,6 @@ he_service_rbrq(struct he_dev *he_dev, int group)
goto return_host_buffers;
}
-#ifdef notdef
- if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
- hprintk("iovec full! cid 0x%x\n", cid);
- goto return_host_buffers;
- }
-#endif
if (!RBRQ_END_PDU(he_dev->rbrq_head))
goto next_rbrq_entry;
@@ -1819,15 +1751,8 @@ he_service_rbrq(struct he_dev *he_dev, int group)
__net_timestamp(skb);
- for (iov = he_vcc->iov_head;
- iov < he_vcc->iov_tail; ++iov) {
- if (iov->iov_base & RBP_SMALLBUF)
- memcpy(skb_put(skb, iov->iov_len),
- he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
- else
- memcpy(skb_put(skb, iov->iov_len),
- he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
- }
+ list_for_each_entry(heb, &he_vcc->buffers, entry)
+ memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
switch (vcc->qos.aal) {
case ATM_AAL0:
@@ -1867,17 +1792,9 @@ he_service_rbrq(struct he_dev *he_dev, int group)
return_host_buffers:
++pdus_assembled;
- for (iov = he_vcc->iov_head;
- iov < he_vcc->iov_tail; ++iov) {
- if (iov->iov_base & RBP_SMALLBUF)
- rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
- else
- rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
-
- rbp->status &= ~RBP_LOANED;
- }
-
- he_vcc->iov_tail = he_vcc->iov_head;
+ list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
+ pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
+ INIT_LIST_HEAD(&he_vcc->buffers);
he_vcc->pdu_len = 0;
next_rbrq_entry:
@@ -1978,59 +1895,51 @@ next_tbrq_entry:
}
}
-
static void
he_service_rbpl(struct he_dev *he_dev, int group)
{
- struct he_rbp *newtail;
+ struct he_rbp *new_tail;
struct he_rbp *rbpl_head;
+ struct he_buff *heb;
+ dma_addr_t mapping;
+ int i;
int moved = 0;
rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
for (;;) {
- newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
+ new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
RBPL_MASK(he_dev->rbpl_tail+1));
/* table 3.42 -- rbpl_tail should never be set to rbpl_head */
- if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
+ if (new_tail == rbpl_head)
break;
- newtail->status |= RBP_LOANED;
- he_dev->rbpl_tail = newtail;
- ++moved;
- }
-
- if (moved)
- he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
-}
-
-static void
-he_service_rbps(struct he_dev *he_dev, int group)
-{
- struct he_rbp *newtail;
- struct he_rbp *rbps_head;
- int moved = 0;
-
- rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
- RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
-
- for (;;) {
- newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
- RBPS_MASK(he_dev->rbps_tail+1));
+ i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
+ if (i > (RBPL_TABLE_SIZE - 1)) {
+ i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
+ if (i > (RBPL_TABLE_SIZE - 1))
+ break;
+ }
+ he_dev->rbpl_hint = i + 1;
- /* table 3.42 -- rbps_tail should never be set to rbps_head */
- if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
+ heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
+ if (!heb)
break;
-
- newtail->status |= RBP_LOANED;
- he_dev->rbps_tail = newtail;
+ heb->mapping = mapping;
+ list_add(&heb->entry, &he_dev->rbpl_outstanding);
+ he_dev->rbpl_virt[i] = heb;
+ set_bit(i, he_dev->rbpl_table);
+ new_tail->idx = i << RBP_IDX_OFFSET;
+ new_tail->phys = mapping + offsetof(struct he_buff, data);
+
+ he_dev->rbpl_tail = new_tail;
++moved;
}
if (moved)
- he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
+ he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
}
static void
@@ -2055,10 +1964,8 @@ he_tasklet(unsigned long data)
HPRINTK("rbrq%d threshold\n", group);
/* fall through */
case ITYPE_RBRQ_TIMER:
- if (he_service_rbrq(he_dev, group)) {
+ if (he_service_rbrq(he_dev, group))
he_service_rbpl(he_dev, group);
- he_service_rbps(he_dev, group);
- }
break;
case ITYPE_TBRQ_THRESH:
HPRINTK("tbrq%d threshold\n", group);
@@ -2070,7 +1977,7 @@ he_tasklet(unsigned long data)
he_service_rbpl(he_dev, group);
break;
case ITYPE_RBPS_THRESH:
- he_service_rbps(he_dev, group);
+ /* shouldn't happen unless small buffers enabled */
break;
case ITYPE_PHY:
HPRINTK("phy interrupt\n");
@@ -2098,7 +2005,6 @@ he_tasklet(unsigned long data)
he_service_rbrq(he_dev, 0);
he_service_rbpl(he_dev, 0);
- he_service_rbps(he_dev, 0);
he_service_tbrq(he_dev, 0);
break;
default:
@@ -2252,7 +2158,7 @@ he_open(struct atm_vcc *vcc)
return -ENOMEM;
}
- he_vcc->iov_tail = he_vcc->iov_head;
+ INIT_LIST_HEAD(&he_vcc->buffers);
he_vcc->pdu_len = 0;
he_vcc->rc_index = -1;
@@ -2406,8 +2312,8 @@ he_open(struct atm_vcc *vcc)
goto open_failed;
}
- rsr1 = RSR1_GROUP(0);
- rsr4 = RSR4_GROUP(0);
+ rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
+ rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
(RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
diff --git a/drivers/atm/he.h b/drivers/atm/he.h
index c2983e0d4ec1..110a27d2ecfc 100644
--- a/drivers/atm/he.h
+++ b/drivers/atm/he.h
@@ -67,11 +67,6 @@
#define CONFIG_RBPL_BUFSIZE 4096
#define RBPL_MASK(x) (((unsigned long)(x))&((CONFIG_RBPL_SIZE<<3)-1))
-#define CONFIG_RBPS_SIZE 1024
-#define CONFIG_RBPS_THRESH 64
-#define CONFIG_RBPS_BUFSIZE 128
-#define RBPS_MASK(x) (((unsigned long)(x))&((CONFIG_RBPS_SIZE<<3)-1))
-
/* 5.1.3 initialize connection memory */
#define CONFIG_RSRA 0x00000
@@ -203,36 +198,37 @@ struct he_hsp {
} group[HE_NUM_GROUPS];
};
-/* figure 2.9 receive buffer pools */
+/*
+ * figure 2.9 receive buffer pools
+ *
+ * since a virtual address might be more than 32 bits, we store an index
+ * in the virt member of he_rbp. NOTE: the lower six bits in the rbrq
+ * addr member are used for buffer status further limiting us to 26 bits.
+ */
struct he_rbp {
volatile u32 phys;
- volatile u32 status;
+ volatile u32 idx; /* virt */
};
-/* NOTE: it is suggested that virt be the virtual address of the host
- buffer. on a 64-bit machine, this would not work. Instead, we
- store the real virtual address in another list, and store an index
- (and buffer status) in the virt member.
-*/
+#define RBP_IDX_OFFSET 6
-#define RBP_INDEX_OFF 6
-#define RBP_INDEX(x) (((long)(x) >> RBP_INDEX_OFF) & 0xffff)
-#define RBP_LOANED 0x80000000
-#define RBP_SMALLBUF 0x40000000
+/*
+ * the he dma engine will try to hold an extra 16 buffers in its local
+ * caches. and add a couple buffers for safety.
+ */
-struct he_virt {
- void *virt;
-};
+#define RBPL_TABLE_SIZE (CONFIG_RBPL_SIZE + 16 + 2)
-#define RBPL_ALIGNMENT CONFIG_RBPL_SIZE
-#define RBPS_ALIGNMENT CONFIG_RBPS_SIZE
+struct he_buff {
+ struct list_head entry;
+ dma_addr_t mapping;
+ unsigned long len;
+ u8 data[];
+};
#ifdef notyet
struct he_group {
- u32 rpbs_size, rpbs_qsize;
- struct he_rbp rbps_ba;
-
u32 rpbl_size, rpbl_qsize;
struct he_rpb_entry *rbpl_ba;
};
@@ -297,18 +293,15 @@ struct he_dev {
struct he_rbrq *rbrq_base, *rbrq_head;
int rbrq_peak;
+ struct he_buff **rbpl_virt;
+ unsigned long *rbpl_table;
+ unsigned long rbpl_hint;
struct pci_pool *rbpl_pool;
dma_addr_t rbpl_phys;
struct he_rbp *rbpl_base, *rbpl_tail;
- struct he_virt *rbpl_virt;
+ struct list_head rbpl_outstanding;
int rbpl_peak;
- struct pci_pool *rbps_pool;
- dma_addr_t rbps_phys;
- struct he_rbp *rbps_base, *rbps_tail;
- struct he_virt *rbps_virt;
- int rbps_peak;
-
dma_addr_t tbrq_phys;
struct he_tbrq *tbrq_base, *tbrq_head;
int tbrq_peak;
@@ -321,20 +314,12 @@ struct he_dev {
struct he_dev *next;
};
-struct he_iovec
-{
- u32 iov_base;
- u32 iov_len;
-};
-
#define HE_MAXIOV 20
struct he_vcc
{
- struct he_iovec iov_head[HE_MAXIOV];
- struct he_iovec *iov_tail;
+ struct list_head buffers;
int pdu_len;
-
int rc_index;
wait_queue_head_t rx_waitq;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index b7473a6110a7..59876c66a92a 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -1,5 +1,4 @@
-/******************************************************************************
- *
+/*
* nicstar.c
*
* Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards.
@@ -16,12 +15,10 @@
*
*
* (C) INESC 1999
- *
- *
- ******************************************************************************/
-
+ */
-/**** IMPORTANT INFORMATION ***************************************************
+/*
+ * IMPORTANT INFORMATION
*
* There are currently three types of spinlocks:
*
@@ -31,9 +28,9 @@
*
* These must NEVER be grabbed in reverse order.
*
- ******************************************************************************/
+ */
-/* Header files ***************************************************************/
+/* Header files */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -41,6 +38,7 @@
#include <linux/atmdev.h>
#include <linux/atm.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/delay.h>
@@ -50,6 +48,7 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include <linux/idr.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
@@ -61,16 +60,11 @@
#include "idt77105.h"
#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
-#if BITS_PER_LONG != 32
-# error FIXME: this driver requires a 32-bit platform
-#endif
-
-/* Additional code ************************************************************/
+/* Additional code */
#include "nicstarmac.c"
-
-/* Configurable parameters ****************************************************/
+/* Configurable parameters */
#undef PHY_LOOPBACK
#undef TX_DEBUG
@@ -78,11 +72,10 @@
#undef GENERAL_DEBUG
#undef EXTRA_DEBUG
-#undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
- you're going to use only raw ATM */
+#undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
+ you're going to use only raw ATM */
-
-/* Do not touch these *********************************************************/
+/* Do not touch these */
#ifdef TX_DEBUG
#define TXPRINTK(args...) printk(args)
@@ -108,2908 +101,2786 @@
#define XPRINTK(args...)
#endif /* EXTRA_DEBUG */
-
-/* Macros *********************************************************************/
+/* Macros */
#define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
#define NS_DELAY mdelay(1)
-#define ALIGN_BUS_ADDR(addr, alignment) \
- ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1))
-#define ALIGN_ADDRESS(addr, alignment) \
- bus_to_virt(ALIGN_BUS_ADDR(virt_to_bus(addr), alignment))
-
-#undef CEIL
+#define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b)))
#ifndef ATM_SKB
#define ATM_SKB(s) (&(s)->atm)
#endif
+#define scq_virt_to_bus(scq, p) \
+ (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org))
-/* Function declarations ******************************************************/
+/* Function declarations */
-static u32 ns_read_sram(ns_dev *card, u32 sram_address);
-static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count);
+static u32 ns_read_sram(ns_dev * card, u32 sram_address);
+static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
+ int count);
static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
-static void __devinit ns_init_card_error(ns_dev *card, int error);
-static scq_info *get_scq(int size, u32 scd);
-static void free_scq(scq_info *scq, struct atm_vcc *vcc);
+static void __devinit ns_init_card_error(ns_dev * card, int error);
+static scq_info *get_scq(ns_dev *card, int size, u32 scd);
+static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
static void push_rxbufs(ns_dev *, struct sk_buff *);
static irqreturn_t ns_irq_handler(int irq, void *dev_id);
static int ns_open(struct atm_vcc *vcc);
static void ns_close(struct atm_vcc *vcc);
-static void fill_tst(ns_dev *card, int n, vc_map *vc);
+static void fill_tst(ns_dev * card, int n, vc_map * vc);
static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
-static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
- struct sk_buff *skb);
-static void process_tsq(ns_dev *card);
-static void drain_scq(ns_dev *card, scq_info *scq, int pos);
-static void process_rsq(ns_dev *card);
-static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe);
+static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
+ struct sk_buff *skb);
+static void process_tsq(ns_dev * card);
+static void drain_scq(ns_dev * card, scq_info * scq, int pos);
+static void process_rsq(ns_dev * card);
+static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
#ifdef NS_USE_DESTRUCTORS
static void ns_sb_destructor(struct sk_buff *sb);
static void ns_lb_destructor(struct sk_buff *lb);
static void ns_hb_destructor(struct sk_buff *hb);
#endif /* NS_USE_DESTRUCTORS */
-static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb);
-static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count);
-static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb);
-static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb);
-static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb);
-static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page);
-static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
-static void which_list(ns_dev *card, struct sk_buff *skb);
+static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
+static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
+static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
+static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb);
+static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb);
+static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page);
+static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
+#ifdef EXTRA_DEBUG
+static void which_list(ns_dev * card, struct sk_buff *skb);
+#endif
static void ns_poll(unsigned long arg);
static int ns_parse_mac(char *mac, unsigned char *esi);
static short ns_h2i(char c);
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
- unsigned long addr);
+ unsigned long addr);
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
-
-
-/* Global variables ***********************************************************/
+/* Global variables */
static struct ns_dev *cards[NS_MAX_CARDS];
static unsigned num_cards;
-static struct atmdev_ops atm_ops =
-{
- .open = ns_open,
- .close = ns_close,
- .ioctl = ns_ioctl,
- .send = ns_send,
- .phy_put = ns_phy_put,
- .phy_get = ns_phy_get,
- .proc_read = ns_proc_read,
- .owner = THIS_MODULE,
+static struct atmdev_ops atm_ops = {
+ .open = ns_open,
+ .close = ns_close,
+ .ioctl = ns_ioctl,
+ .send = ns_send,
+ .phy_put = ns_phy_put,
+ .phy_get = ns_phy_get,
+ .proc_read = ns_proc_read,
+ .owner = THIS_MODULE,
};
+
static struct timer_list ns_timer;
static char *mac[NS_MAX_CARDS];
module_param_array(mac, charp, NULL, 0);
MODULE_LICENSE("GPL");
-
-/* Functions*******************************************************************/
+/* Functions */
static int __devinit nicstar_init_one(struct pci_dev *pcidev,
const struct pci_device_id *ent)
{
- static int index = -1;
- unsigned int error;
+ static int index = -1;
+ unsigned int error;
- index++;
- cards[index] = NULL;
+ index++;
+ cards[index] = NULL;
- error = ns_init_card(index, pcidev);
- if (error) {
- cards[index--] = NULL; /* don't increment index */
- goto err_out;
- }
+ error = ns_init_card(index, pcidev);
+ if (error) {
+ cards[index--] = NULL; /* don't increment index */
+ goto err_out;
+ }
- return 0;
+ return 0;
err_out:
- return -ENODEV;
+ return -ENODEV;
}
-
-
static void __devexit nicstar_remove_one(struct pci_dev *pcidev)
{
- int i, j;
- ns_dev *card = pci_get_drvdata(pcidev);
- struct sk_buff *hb;
- struct sk_buff *iovb;
- struct sk_buff *lb;
- struct sk_buff *sb;
-
- i = card->index;
-
- if (cards[i] == NULL)
- return;
-
- if (card->atmdev->phy && card->atmdev->phy->stop)
- card->atmdev->phy->stop(card->atmdev);
-
- /* Stop everything */
- writel(0x00000000, card->membase + CFG);
-
- /* De-register device */
- atm_dev_deregister(card->atmdev);
-
- /* Disable PCI device */
- pci_disable_device(pcidev);
-
- /* Free up resources */
- j = 0;
- PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
- while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
- {
- dev_kfree_skb_any(hb);
- j++;
- }
- PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
- j = 0;
- PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count);
- while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
- {
- dev_kfree_skb_any(iovb);
- j++;
- }
- PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
- while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
- dev_kfree_skb_any(lb);
- while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
- dev_kfree_skb_any(sb);
- free_scq(card->scq0, NULL);
- for (j = 0; j < NS_FRSCD_NUM; j++)
- {
- if (card->scd2vc[j] != NULL)
- free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
- }
- kfree(card->rsq.org);
- kfree(card->tsq.org);
- free_irq(card->pcidev->irq, card);
- iounmap(card->membase);
- kfree(card);
+ int i, j;
+ ns_dev *card = pci_get_drvdata(pcidev);
+ struct sk_buff *hb;
+ struct sk_buff *iovb;
+ struct sk_buff *lb;
+ struct sk_buff *sb;
+
+ i = card->index;
+
+ if (cards[i] == NULL)
+ return;
+
+ if (card->atmdev->phy && card->atmdev->phy->stop)
+ card->atmdev->phy->stop(card->atmdev);
+
+ /* Stop everything */
+ writel(0x00000000, card->membase + CFG);
+
+ /* De-register device */
+ atm_dev_deregister(card->atmdev);
+
+ /* Disable PCI device */
+ pci_disable_device(pcidev);
+
+ /* Free up resources */
+ j = 0;
+ PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
+ while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) {
+ dev_kfree_skb_any(hb);
+ j++;
+ }
+ PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
+ j = 0;
+ PRINTK("nicstar%d: freeing %d iovec buffers.\n", i,
+ card->iovpool.count);
+ while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) {
+ dev_kfree_skb_any(iovb);
+ j++;
+ }
+ PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
+ while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
+ dev_kfree_skb_any(lb);
+ while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
+ dev_kfree_skb_any(sb);
+ free_scq(card, card->scq0, NULL);
+ for (j = 0; j < NS_FRSCD_NUM; j++) {
+ if (card->scd2vc[j] != NULL)
+ free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
+ }
+ idr_remove_all(&card->idr);
+ idr_destroy(&card->idr);
+ pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+ card->rsq.org, card->rsq.dma);
+ pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+ card->tsq.org, card->tsq.dma);
+ free_irq(card->pcidev->irq, card);
+ iounmap(card->membase);
+ kfree(card);
}
-
-
-static struct pci_device_id nicstar_pci_tbl[] __devinitdata =
-{
+static struct pci_device_id nicstar_pci_tbl[] __devinitdata = {
{PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,} /* terminate list */
};
-MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl);
-
+MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl);
static struct pci_driver nicstar_driver = {
- .name = "nicstar",
- .id_table = nicstar_pci_tbl,
- .probe = nicstar_init_one,
- .remove = __devexit_p(nicstar_remove_one),
+ .name = "nicstar",
+ .id_table = nicstar_pci_tbl,
+ .probe = nicstar_init_one,
+ .remove = __devexit_p(nicstar_remove_one),
};
-
-
static int __init nicstar_init(void)
{
- unsigned error = 0; /* Initialized to remove compile warning */
+ unsigned error = 0; /* Initialized to remove compile warning */
+
+ XPRINTK("nicstar: nicstar_init() called.\n");
- XPRINTK("nicstar: nicstar_init() called.\n");
+ error = pci_register_driver(&nicstar_driver);
- error = pci_register_driver(&nicstar_driver);
-
- TXPRINTK("nicstar: TX debug enabled.\n");
- RXPRINTK("nicstar: RX debug enabled.\n");
- PRINTK("nicstar: General debug enabled.\n");
+ TXPRINTK("nicstar: TX debug enabled.\n");
+ RXPRINTK("nicstar: RX debug enabled.\n");
+ PRINTK("nicstar: General debug enabled.\n");
#ifdef PHY_LOOPBACK
- printk("nicstar: using PHY loopback.\n");
+ printk("nicstar: using PHY loopback.\n");
#endif /* PHY_LOOPBACK */
- XPRINTK("nicstar: nicstar_init() returned.\n");
-
- if (!error) {
- init_timer(&ns_timer);
- ns_timer.expires = jiffies + NS_POLL_PERIOD;
- ns_timer.data = 0UL;
- ns_timer.function = ns_poll;
- add_timer(&ns_timer);
- }
-
- return error;
-}
+ XPRINTK("nicstar: nicstar_init() returned.\n");
+ if (!error) {
+ init_timer(&ns_timer);
+ ns_timer.expires = jiffies + NS_POLL_PERIOD;
+ ns_timer.data = 0UL;
+ ns_timer.function = ns_poll;
+ add_timer(&ns_timer);
+ }
+ return error;
+}
static void __exit nicstar_cleanup(void)
{
- XPRINTK("nicstar: nicstar_cleanup() called.\n");
+ XPRINTK("nicstar: nicstar_cleanup() called.\n");
- del_timer(&ns_timer);
+ del_timer(&ns_timer);
- pci_unregister_driver(&nicstar_driver);
+ pci_unregister_driver(&nicstar_driver);
- XPRINTK("nicstar: nicstar_cleanup() returned.\n");
+ XPRINTK("nicstar: nicstar_cleanup() returned.\n");
}
-
-
-static u32 ns_read_sram(ns_dev *card, u32 sram_address)
+static u32 ns_read_sram(ns_dev * card, u32 sram_address)
{
- unsigned long flags;
- u32 data;
- sram_address <<= 2;
- sram_address &= 0x0007FFFC; /* address must be dword aligned */
- sram_address |= 0x50000000; /* SRAM read command */
- spin_lock_irqsave(&card->res_lock, flags);
- while (CMD_BUSY(card));
- writel(sram_address, card->membase + CMD);
- while (CMD_BUSY(card));
- data = readl(card->membase + DR0);
- spin_unlock_irqrestore(&card->res_lock, flags);
- return data;
+ unsigned long flags;
+ u32 data;
+ sram_address <<= 2;
+ sram_address &= 0x0007FFFC; /* address must be dword aligned */
+ sram_address |= 0x50000000; /* SRAM read command */
+ spin_lock_irqsave(&card->res_lock, flags);
+ while (CMD_BUSY(card)) ;
+ writel(sram_address, card->membase + CMD);
+ while (CMD_BUSY(card)) ;
+ data = readl(card->membase + DR0);
+ spin_unlock_irqrestore(&card->res_lock, flags);
+ return data;
}
-
-
-static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count)
+static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
+ int count)
{
- unsigned long flags;
- int i, c;
- count--; /* count range now is 0..3 instead of 1..4 */
- c = count;
- c <<= 2; /* to use increments of 4 */
- spin_lock_irqsave(&card->res_lock, flags);
- while (CMD_BUSY(card));
- for (i = 0; i <= c; i += 4)
- writel(*(value++), card->membase + i);
- /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
- so card->membase + DR0 == card->membase */
- sram_address <<= 2;
- sram_address &= 0x0007FFFC;
- sram_address |= (0x40000000 | count);
- writel(sram_address, card->membase + CMD);
- spin_unlock_irqrestore(&card->res_lock, flags);
+ unsigned long flags;
+ int i, c;
+ count--; /* count range now is 0..3 instead of 1..4 */
+ c = count;
+ c <<= 2; /* to use increments of 4 */
+ spin_lock_irqsave(&card->res_lock, flags);
+ while (CMD_BUSY(card)) ;
+ for (i = 0; i <= c; i += 4)
+ writel(*(value++), card->membase + i);
+ /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
+ so card->membase + DR0 == card->membase */
+ sram_address <<= 2;
+ sram_address &= 0x0007FFFC;
+ sram_address |= (0x40000000 | count);
+ writel(sram_address, card->membase + CMD);
+ spin_unlock_irqrestore(&card->res_lock, flags);
}
-
static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
{
- int j;
- struct ns_dev *card = NULL;
- unsigned char pci_latency;
- unsigned error;
- u32 data;
- u32 u32d[4];
- u32 ns_cfg_rctsize;
- int bcount;
- unsigned long membase;
-
- error = 0;
-
- if (pci_enable_device(pcidev))
- {
- printk("nicstar%d: can't enable PCI device\n", i);
- error = 2;
- ns_init_card_error(card, error);
- return error;
- }
-
- if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL)
- {
- printk("nicstar%d: can't allocate memory for device structure.\n", i);
- error = 2;
- ns_init_card_error(card, error);
- return error;
- }
- cards[i] = card;
- spin_lock_init(&card->int_lock);
- spin_lock_init(&card->res_lock);
-
- pci_set_drvdata(pcidev, card);
-
- card->index = i;
- card->atmdev = NULL;
- card->pcidev = pcidev;
- membase = pci_resource_start(pcidev, 1);
- card->membase = ioremap(membase, NS_IOREMAP_SIZE);
- if (!card->membase)
- {
- printk("nicstar%d: can't ioremap() membase.\n",i);
- error = 3;
- ns_init_card_error(card, error);
- return error;
- }
- PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase);
-
- pci_set_master(pcidev);
-
- if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0)
- {
- printk("nicstar%d: can't read PCI latency timer.\n", i);
- error = 6;
- ns_init_card_error(card, error);
- return error;
- }
+ int j;
+ struct ns_dev *card = NULL;
+ unsigned char pci_latency;
+ unsigned error;
+ u32 data;
+ u32 u32d[4];
+ u32 ns_cfg_rctsize;
+ int bcount;
+ unsigned long membase;
+
+ error = 0;
+
+ if (pci_enable_device(pcidev)) {
+ printk("nicstar%d: can't enable PCI device\n", i);
+ error = 2;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) {
+ printk(KERN_WARNING
+ "nicstar%d: No suitable DMA available.\n", i);
+ error = 2;
+ ns_init_card_error(card, error);
+ return error;
+ }
+
+ if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) {
+ printk
+ ("nicstar%d: can't allocate memory for device structure.\n",
+ i);
+ error = 2;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ cards[i] = card;
+ spin_lock_init(&card->int_lock);
+ spin_lock_init(&card->res_lock);
+
+ pci_set_drvdata(pcidev, card);
+
+ card->index = i;
+ card->atmdev = NULL;
+ card->pcidev = pcidev;
+ membase = pci_resource_start(pcidev, 1);
+ card->membase = ioremap(membase, NS_IOREMAP_SIZE);
+ if (!card->membase) {
+ printk("nicstar%d: can't ioremap() membase.\n", i);
+ error = 3;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase);
+
+ pci_set_master(pcidev);
+
+ if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) {
+ printk("nicstar%d: can't read PCI latency timer.\n", i);
+ error = 6;
+ ns_init_card_error(card, error);
+ return error;
+ }
#ifdef NS_PCI_LATENCY
- if (pci_latency < NS_PCI_LATENCY)
- {
- PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
- for (j = 1; j < 4; j++)
- {
- if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
- break;
- }
- if (j == 4)
- {
- printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
- error = 7;
- ns_init_card_error(card, error);
- return error;
- }
- }
+ if (pci_latency < NS_PCI_LATENCY) {
+ PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i,
+ NS_PCI_LATENCY);
+ for (j = 1; j < 4; j++) {
+ if (pci_write_config_byte
+ (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
+ break;
+ }
+ if (j == 4) {
+ printk
+ ("nicstar%d: can't set PCI latency timer to %d.\n",
+ i, NS_PCI_LATENCY);
+ error = 7;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ }
#endif /* NS_PCI_LATENCY */
-
- /* Clear timer overflow */
- data = readl(card->membase + STAT);
- if (data & NS_STAT_TMROF)
- writel(NS_STAT_TMROF, card->membase + STAT);
-
- /* Software reset */
- writel(NS_CFG_SWRST, card->membase + CFG);
- NS_DELAY;
- writel(0x00000000, card->membase + CFG);
-
- /* PHY reset */
- writel(0x00000008, card->membase + GP);
- NS_DELAY;
- writel(0x00000001, card->membase + GP);
- NS_DELAY;
- while (CMD_BUSY(card));
- writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */
- NS_DELAY;
-
- /* Detect PHY type */
- while (CMD_BUSY(card));
- writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
- while (CMD_BUSY(card));
- data = readl(card->membase + DR0);
- switch(data) {
- case 0x00000009:
- printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
- card->max_pcr = ATM_25_PCR;
- while(CMD_BUSY(card));
- writel(0x00000008, card->membase + DR0);
- writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
- /* Clear an eventual pending interrupt */
- writel(NS_STAT_SFBQF, card->membase + STAT);
+
+ /* Clear timer overflow */
+ data = readl(card->membase + STAT);
+ if (data & NS_STAT_TMROF)
+ writel(NS_STAT_TMROF, card->membase + STAT);
+
+ /* Software reset */
+ writel(NS_CFG_SWRST, card->membase + CFG);
+ NS_DELAY;
+ writel(0x00000000, card->membase + CFG);
+
+ /* PHY reset */
+ writel(0x00000008, card->membase + GP);
+ NS_DELAY;
+ writel(0x00000001, card->membase + GP);
+ NS_DELAY;
+ while (CMD_BUSY(card)) ;
+ writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */
+ NS_DELAY;
+
+ /* Detect PHY type */
+ while (CMD_BUSY(card)) ;
+ writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
+ while (CMD_BUSY(card)) ;
+ data = readl(card->membase + DR0);
+ switch (data) {
+ case 0x00000009:
+ printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
+ card->max_pcr = ATM_25_PCR;
+ while (CMD_BUSY(card)) ;
+ writel(0x00000008, card->membase + DR0);
+ writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
+ /* Clear an eventual pending interrupt */
+ writel(NS_STAT_SFBQF, card->membase + STAT);
#ifdef PHY_LOOPBACK
- while(CMD_BUSY(card));
- writel(0x00000022, card->membase + DR0);
- writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
+ while (CMD_BUSY(card)) ;
+ writel(0x00000022, card->membase + DR0);
+ writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
#endif /* PHY_LOOPBACK */
- break;
- case 0x00000030:
- case 0x00000031:
- printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
- card->max_pcr = ATM_OC3_PCR;
+ break;
+ case 0x00000030:
+ case 0x00000031:
+ printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
+ card->max_pcr = ATM_OC3_PCR;
#ifdef PHY_LOOPBACK
- while(CMD_BUSY(card));
- writel(0x00000002, card->membase + DR0);
- writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
+ while (CMD_BUSY(card)) ;
+ writel(0x00000002, card->membase + DR0);
+ writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
#endif /* PHY_LOOPBACK */
- break;
- default:
- printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
- error = 8;
- ns_init_card_error(card, error);
- return error;
- }
- writel(0x00000000, card->membase + GP);
-
- /* Determine SRAM size */
- data = 0x76543210;
- ns_write_sram(card, 0x1C003, &data, 1);
- data = 0x89ABCDEF;
- ns_write_sram(card, 0x14003, &data, 1);
- if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
- ns_read_sram(card, 0x1C003) == 0x76543210)
- card->sram_size = 128;
- else
- card->sram_size = 32;
- PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
-
- card->rct_size = NS_MAX_RCTSIZE;
+ break;
+ default:
+ printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
+ error = 8;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ writel(0x00000000, card->membase + GP);
+
+ /* Determine SRAM size */
+ data = 0x76543210;
+ ns_write_sram(card, 0x1C003, &data, 1);
+ data = 0x89ABCDEF;
+ ns_write_sram(card, 0x14003, &data, 1);
+ if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
+ ns_read_sram(card, 0x1C003) == 0x76543210)
+ card->sram_size = 128;
+ else
+ card->sram_size = 32;
+ PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
+
+ card->rct_size = NS_MAX_RCTSIZE;
#if (NS_MAX_RCTSIZE == 4096)
- if (card->sram_size == 128)
- printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i);
+ if (card->sram_size == 128)
+ printk
+ ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n",
+ i);
#elif (NS_MAX_RCTSIZE == 16384)
- if (card->sram_size == 32)
- {
- printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i);
- card->rct_size = 4096;
- }
+ if (card->sram_size == 32) {
+ printk
+ ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n",
+ i);
+ card->rct_size = 4096;
+ }
#else
#error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
#endif
- card->vpibits = NS_VPIBITS;
- if (card->rct_size == 4096)
- card->vcibits = 12 - NS_VPIBITS;
- else /* card->rct_size == 16384 */
- card->vcibits = 14 - NS_VPIBITS;
-
- /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
- if (mac[i] == NULL)
- nicstar_init_eprom(card->membase);
-
- /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
- writel(0x00000000, card->membase + VPM);
-
- /* Initialize TSQ */
- card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL);
- if (card->tsq.org == NULL)
- {
- printk("nicstar%d: can't allocate TSQ.\n", i);
- error = 10;
- ns_init_card_error(card, error);
- return error;
- }
- card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT);
- card->tsq.next = card->tsq.base;
- card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
- for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
- ns_tsi_init(card->tsq.base + j);
- writel(0x00000000, card->membase + TSQH);
- writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB);
- PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, (u32) card->tsq.base,
- (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB));
-
- /* Initialize RSQ */
- card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL);
- if (card->rsq.org == NULL)
- {
- printk("nicstar%d: can't allocate RSQ.\n", i);
- error = 11;
- ns_init_card_error(card, error);
- return error;
- }
- card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT);
- card->rsq.next = card->rsq.base;
- card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
- for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
- ns_rsqe_init(card->rsq.base + j);
- writel(0x00000000, card->membase + RSQH);
- writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB);
- PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base);
-
- /* Initialize SCQ0, the only VBR SCQ used */
- card->scq1 = NULL;
- card->scq2 = NULL;
- card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0);
- if (card->scq0 == NULL)
- {
- printk("nicstar%d: can't get SCQ0.\n", i);
- error = 12;
- ns_init_card_error(card, error);
- return error;
- }
- u32d[0] = (u32) virt_to_bus(card->scq0->base);
- u32d[1] = (u32) 0x00000000;
- u32d[2] = (u32) 0xffffffff;
- u32d[3] = (u32) 0x00000000;
- ns_write_sram(card, NS_VRSCD0, u32d, 4);
- ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */
- ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */
- card->scq0->scd = NS_VRSCD0;
- PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base);
-
- /* Initialize TSTs */
- card->tst_addr = NS_TST0;
- card->tst_free_entries = NS_TST_NUM_ENTRIES;
- data = NS_TST_OPCODE_VARIABLE;
- for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
- ns_write_sram(card, NS_TST0 + j, &data, 1);
- data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
- ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
- for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
- ns_write_sram(card, NS_TST1 + j, &data, 1);
- data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
- ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
- for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
- card->tste2vc[j] = NULL;
- writel(NS_TST0 << 2, card->membase + TSTB);
-
-
- /* Initialize RCT. AAL type is set on opening the VC. */
+ card->vpibits = NS_VPIBITS;
+ if (card->rct_size == 4096)
+ card->vcibits = 12 - NS_VPIBITS;
+ else /* card->rct_size == 16384 */
+ card->vcibits = 14 - NS_VPIBITS;
+
+ /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
+ if (mac[i] == NULL)
+ nicstar_init_eprom(card->membase);
+
+ /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
+ writel(0x00000000, card->membase + VPM);
+
+ /* Initialize TSQ */
+ card->tsq.org = pci_alloc_consistent(card->pcidev,
+ NS_TSQSIZE + NS_TSQ_ALIGNMENT,
+ &card->tsq.dma);
+ if (card->tsq.org == NULL) {
+ printk("nicstar%d: can't allocate TSQ.\n", i);
+ error = 10;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT);
+ card->tsq.next = card->tsq.base;
+ card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
+ for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
+ ns_tsi_init(card->tsq.base + j);
+ writel(0x00000000, card->membase + TSQH);
+ writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB);
+ PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
+
+ /* Initialize RSQ */
+ card->rsq.org = pci_alloc_consistent(card->pcidev,
+ NS_RSQSIZE + NS_RSQ_ALIGNMENT,
+ &card->rsq.dma);
+ if (card->rsq.org == NULL) {
+ printk("nicstar%d: can't allocate RSQ.\n", i);
+ error = 11;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT);
+ card->rsq.next = card->rsq.base;
+ card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
+ for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
+ ns_rsqe_init(card->rsq.base + j);
+ writel(0x00000000, card->membase + RSQH);
+ writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB);
+ PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base);
+
+ /* Initialize SCQ0, the only VBR SCQ used */
+ card->scq1 = NULL;
+ card->scq2 = NULL;
+ card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0);
+ if (card->scq0 == NULL) {
+ printk("nicstar%d: can't get SCQ0.\n", i);
+ error = 12;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base);
+ u32d[1] = (u32) 0x00000000;
+ u32d[2] = (u32) 0xffffffff;
+ u32d[3] = (u32) 0x00000000;
+ ns_write_sram(card, NS_VRSCD0, u32d, 4);
+ ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */
+ ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */
+ card->scq0->scd = NS_VRSCD0;
+ PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base);
+
+ /* Initialize TSTs */
+ card->tst_addr = NS_TST0;
+ card->tst_free_entries = NS_TST_NUM_ENTRIES;
+ data = NS_TST_OPCODE_VARIABLE;
+ for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
+ ns_write_sram(card, NS_TST0 + j, &data, 1);
+ data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
+ ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
+ for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
+ ns_write_sram(card, NS_TST1 + j, &data, 1);
+ data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
+ ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
+ for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
+ card->tste2vc[j] = NULL;
+ writel(NS_TST0 << 2, card->membase + TSTB);
+
+ /* Initialize RCT. AAL type is set on opening the VC. */
#ifdef RCQ_SUPPORT
- u32d[0] = NS_RCTE_RAWCELLINTEN;
+ u32d[0] = NS_RCTE_RAWCELLINTEN;
#else
- u32d[0] = 0x00000000;
+ u32d[0] = 0x00000000;
#endif /* RCQ_SUPPORT */
- u32d[1] = 0x00000000;
- u32d[2] = 0x00000000;
- u32d[3] = 0xFFFFFFFF;
- for (j = 0; j < card->rct_size; j++)
- ns_write_sram(card, j * 4, u32d, 4);
-
- memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
-
- for (j = 0; j < NS_FRSCD_NUM; j++)
- card->scd2vc[j] = NULL;
-
- /* Initialize buffer levels */
- card->sbnr.min = MIN_SB;
- card->sbnr.init = NUM_SB;
- card->sbnr.max = MAX_SB;
- card->lbnr.min = MIN_LB;
- card->lbnr.init = NUM_LB;
- card->lbnr.max = MAX_LB;
- card->iovnr.min = MIN_IOVB;
- card->iovnr.init = NUM_IOVB;
- card->iovnr.max = MAX_IOVB;
- card->hbnr.min = MIN_HB;
- card->hbnr.init = NUM_HB;
- card->hbnr.max = MAX_HB;
-
- card->sm_handle = 0x00000000;
- card->sm_addr = 0x00000000;
- card->lg_handle = 0x00000000;
- card->lg_addr = 0x00000000;
-
- card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
-
- /* Pre-allocate some huge buffers */
- skb_queue_head_init(&card->hbpool.queue);
- card->hbpool.count = 0;
- for (j = 0; j < NUM_HB; j++)
- {
- struct sk_buff *hb;
- hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
- if (hb == NULL)
- {
- printk("nicstar%d: can't allocate %dth of %d huge buffers.\n",
- i, j, NUM_HB);
- error = 13;
- ns_init_card_error(card, error);
- return error;
- }
- NS_SKB_CB(hb)->buf_type = BUF_NONE;
- skb_queue_tail(&card->hbpool.queue, hb);
- card->hbpool.count++;
- }
-
-
- /* Allocate large buffers */
- skb_queue_head_init(&card->lbpool.queue);
- card->lbpool.count = 0; /* Not used */
- for (j = 0; j < NUM_LB; j++)
- {
- struct sk_buff *lb;
- lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
- if (lb == NULL)
- {
- printk("nicstar%d: can't allocate %dth of %d large buffers.\n",
- i, j, NUM_LB);
- error = 14;
- ns_init_card_error(card, error);
- return error;
- }
- NS_SKB_CB(lb)->buf_type = BUF_LG;
- skb_queue_tail(&card->lbpool.queue, lb);
- skb_reserve(lb, NS_SMBUFSIZE);
- push_rxbufs(card, lb);
- /* Due to the implementation of push_rxbufs() this is 1, not 0 */
- if (j == 1)
- {
- card->rcbuf = lb;
- card->rawch = (u32) virt_to_bus(lb->data);
- }
- }
- /* Test for strange behaviour which leads to crashes */
- if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min)
- {
- printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
- i, j, bcount);
- error = 14;
- ns_init_card_error(card, error);
- return error;
- }
-
-
- /* Allocate small buffers */
- skb_queue_head_init(&card->sbpool.queue);
- card->sbpool.count = 0; /* Not used */
- for (j = 0; j < NUM_SB; j++)
- {
- struct sk_buff *sb;
- sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
- if (sb == NULL)
- {
- printk("nicstar%d: can't allocate %dth of %d small buffers.\n",
- i, j, NUM_SB);
- error = 15;
- ns_init_card_error(card, error);
- return error;
- }
- NS_SKB_CB(sb)->buf_type = BUF_SM;
- skb_queue_tail(&card->sbpool.queue, sb);
- skb_reserve(sb, NS_AAL0_HEADER);
- push_rxbufs(card, sb);
- }
- /* Test for strange behaviour which leads to crashes */
- if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
- {
- printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
- i, j, bcount);
- error = 15;
- ns_init_card_error(card, error);
- return error;
- }
-
-
- /* Allocate iovec buffers */
- skb_queue_head_init(&card->iovpool.queue);
- card->iovpool.count = 0;
- for (j = 0; j < NUM_IOVB; j++)
- {
- struct sk_buff *iovb;
- iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
- if (iovb == NULL)
- {
- printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
- i, j, NUM_IOVB);
- error = 16;
- ns_init_card_error(card, error);
- return error;
- }
- NS_SKB_CB(iovb)->buf_type = BUF_NONE;
- skb_queue_tail(&card->iovpool.queue, iovb);
- card->iovpool.count++;
- }
-
- /* Configure NICStAR */
- if (card->rct_size == 4096)
- ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
- else /* (card->rct_size == 16384) */
- ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
-
- card->efbie = 1;
-
- card->intcnt = 0;
- if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0)
- {
- printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
- error = 9;
- ns_init_card_error(card, error);
- return error;
- }
-
- /* Register device */
- card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
- if (card->atmdev == NULL)
- {
- printk("nicstar%d: can't register device.\n", i);
- error = 17;
- ns_init_card_error(card, error);
- return error;
- }
-
- if (ns_parse_mac(mac[i], card->atmdev->esi)) {
- nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
- card->atmdev->esi, 6);
- if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) {
- nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
- card->atmdev->esi, 6);
- }
- }
-
- printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
-
- card->atmdev->dev_data = card;
- card->atmdev->ci_range.vpi_bits = card->vpibits;
- card->atmdev->ci_range.vci_bits = card->vcibits;
- card->atmdev->link_rate = card->max_pcr;
- card->atmdev->phy = NULL;
+ u32d[1] = 0x00000000;
+ u32d[2] = 0x00000000;
+ u32d[3] = 0xFFFFFFFF;
+ for (j = 0; j < card->rct_size; j++)
+ ns_write_sram(card, j * 4, u32d, 4);
+
+ memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
+
+ for (j = 0; j < NS_FRSCD_NUM; j++)
+ card->scd2vc[j] = NULL;
+
+ /* Initialize buffer levels */
+ card->sbnr.min = MIN_SB;
+ card->sbnr.init = NUM_SB;
+ card->sbnr.max = MAX_SB;
+ card->lbnr.min = MIN_LB;
+ card->lbnr.init = NUM_LB;
+ card->lbnr.max = MAX_LB;
+ card->iovnr.min = MIN_IOVB;
+ card->iovnr.init = NUM_IOVB;
+ card->iovnr.max = MAX_IOVB;
+ card->hbnr.min = MIN_HB;
+ card->hbnr.init = NUM_HB;
+ card->hbnr.max = MAX_HB;
+
+ card->sm_handle = 0x00000000;
+ card->sm_addr = 0x00000000;
+ card->lg_handle = 0x00000000;
+ card->lg_addr = 0x00000000;
+
+ card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
+
+ idr_init(&card->idr);
+
+ /* Pre-allocate some huge buffers */
+ skb_queue_head_init(&card->hbpool.queue);
+ card->hbpool.count = 0;
+ for (j = 0; j < NUM_HB; j++) {
+ struct sk_buff *hb;
+ hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
+ if (hb == NULL) {
+ printk
+ ("nicstar%d: can't allocate %dth of %d huge buffers.\n",
+ i, j, NUM_HB);
+ error = 13;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ NS_PRV_BUFTYPE(hb) = BUF_NONE;
+ skb_queue_tail(&card->hbpool.queue, hb);
+ card->hbpool.count++;
+ }
+
+ /* Allocate large buffers */
+ skb_queue_head_init(&card->lbpool.queue);
+ card->lbpool.count = 0; /* Not used */
+ for (j = 0; j < NUM_LB; j++) {
+ struct sk_buff *lb;
+ lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
+ if (lb == NULL) {
+ printk
+ ("nicstar%d: can't allocate %dth of %d large buffers.\n",
+ i, j, NUM_LB);
+ error = 14;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ NS_PRV_BUFTYPE(lb) = BUF_LG;
+ skb_queue_tail(&card->lbpool.queue, lb);
+ skb_reserve(lb, NS_SMBUFSIZE);
+ push_rxbufs(card, lb);
+ /* Due to the implementation of push_rxbufs() this is 1, not 0 */
+ if (j == 1) {
+ card->rcbuf = lb;
+ card->rawcell = (struct ns_rcqe *) lb->data;
+ card->rawch = NS_PRV_DMA(lb);
+ }
+ }
+ /* Test for strange behaviour which leads to crashes */
+ if ((bcount =
+ ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) {
+ printk
+ ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
+ i, j, bcount);
+ error = 14;
+ ns_init_card_error(card, error);
+ return error;
+ }
+
+ /* Allocate small buffers */
+ skb_queue_head_init(&card->sbpool.queue);
+ card->sbpool.count = 0; /* Not used */
+ for (j = 0; j < NUM_SB; j++) {
+ struct sk_buff *sb;
+ sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
+ if (sb == NULL) {
+ printk
+ ("nicstar%d: can't allocate %dth of %d small buffers.\n",
+ i, j, NUM_SB);
+ error = 15;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ NS_PRV_BUFTYPE(sb) = BUF_SM;
+ skb_queue_tail(&card->sbpool.queue, sb);
+ skb_reserve(sb, NS_AAL0_HEADER);
+ push_rxbufs(card, sb);
+ }
+ /* Test for strange behaviour which leads to crashes */
+ if ((bcount =
+ ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) {
+ printk
+ ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
+ i, j, bcount);
+ error = 15;
+ ns_init_card_error(card, error);
+ return error;
+ }
+
+ /* Allocate iovec buffers */
+ skb_queue_head_init(&card->iovpool.queue);
+ card->iovpool.count = 0;
+ for (j = 0; j < NUM_IOVB; j++) {
+ struct sk_buff *iovb;
+ iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
+ if (iovb == NULL) {
+ printk
+ ("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
+ i, j, NUM_IOVB);
+ error = 16;
+ ns_init_card_error(card, error);
+ return error;
+ }
+ NS_PRV_BUFTYPE(iovb) = BUF_NONE;
+ skb_queue_tail(&card->iovpool.queue, iovb);
+ card->iovpool.count++;
+ }
+
+ /* Configure NICStAR */
+ if (card->rct_size == 4096)
+ ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
+ else /* (card->rct_size == 16384) */
+ ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
+
+ card->efbie = 1;
+
+ card->intcnt = 0;
+ if (request_irq
+ (pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED,
+ "nicstar", card) != 0) {
+ printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
+ error = 9;
+ ns_init_card_error(card, error);
+ return error;
+ }
+
+ /* Register device */
+ card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
+ if (card->atmdev == NULL) {
+ printk("nicstar%d: can't register device.\n", i);
+ error = 17;
+ ns_init_card_error(card, error);
+ return error;
+ }
+
+ if (ns_parse_mac(mac[i], card->atmdev->esi)) {
+ nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
+ card->atmdev->esi, 6);
+ if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
+ 0) {
+ nicstar_read_eprom(card->membase,
+ NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
+ card->atmdev->esi, 6);
+ }
+ }
+
+ printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
+
+ card->atmdev->dev_data = card;
+ card->atmdev->ci_range.vpi_bits = card->vpibits;
+ card->atmdev->ci_range.vci_bits = card->vcibits;
+ card->atmdev->link_rate = card->max_pcr;
+ card->atmdev->phy = NULL;
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
- if (card->max_pcr == ATM_OC3_PCR)
- suni_init(card->atmdev);
+ if (card->max_pcr == ATM_OC3_PCR)
+ suni_init(card->atmdev);
#endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
#ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
- if (card->max_pcr == ATM_25_PCR)
- idt77105_init(card->atmdev);
+ if (card->max_pcr == ATM_25_PCR)
+ idt77105_init(card->atmdev);
#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
- if (card->atmdev->phy && card->atmdev->phy->start)
- card->atmdev->phy->start(card->atmdev);
-
- writel(NS_CFG_RXPATH |
- NS_CFG_SMBUFSIZE |
- NS_CFG_LGBUFSIZE |
- NS_CFG_EFBIE |
- NS_CFG_RSQSIZE |
- NS_CFG_VPIBITS |
- ns_cfg_rctsize |
- NS_CFG_RXINT_NODELAY |
- NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */
- NS_CFG_RSQAFIE |
- NS_CFG_TXEN |
- NS_CFG_TXIE |
- NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */
- NS_CFG_PHYIE,
- card->membase + CFG);
-
- num_cards++;
-
- return error;
-}
+ if (card->atmdev->phy && card->atmdev->phy->start)
+ card->atmdev->phy->start(card->atmdev);
+ writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */
+ NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */
+ NS_CFG_PHYIE, card->membase + CFG);
+ num_cards++;
-static void __devinit ns_init_card_error(ns_dev *card, int error)
-{
- if (error >= 17)
- {
- writel(0x00000000, card->membase + CFG);
- }
- if (error >= 16)
- {
- struct sk_buff *iovb;
- while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
- dev_kfree_skb_any(iovb);
- }
- if (error >= 15)
- {
- struct sk_buff *sb;
- while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
- dev_kfree_skb_any(sb);
- free_scq(card->scq0, NULL);
- }
- if (error >= 14)
- {
- struct sk_buff *lb;
- while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
- dev_kfree_skb_any(lb);
- }
- if (error >= 13)
- {
- struct sk_buff *hb;
- while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
- dev_kfree_skb_any(hb);
- }
- if (error >= 12)
- {
- kfree(card->rsq.org);
- }
- if (error >= 11)
- {
- kfree(card->tsq.org);
- }
- if (error >= 10)
- {
- free_irq(card->pcidev->irq, card);
- }
- if (error >= 4)
- {
- iounmap(card->membase);
- }
- if (error >= 3)
- {
- pci_disable_device(card->pcidev);
- kfree(card);
- }
+ return error;
}
-
-
-static scq_info *get_scq(int size, u32 scd)
+static void __devinit ns_init_card_error(ns_dev * card, int error)
{
- scq_info *scq;
- int i;
-
- if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
- return NULL;
-
- scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
- if (scq == NULL)
- return NULL;
- scq->org = kmalloc(2 * size, GFP_KERNEL);
- if (scq->org == NULL)
- {
- kfree(scq);
- return NULL;
- }
- scq->skb = kmalloc(sizeof(struct sk_buff *) *
- (size / NS_SCQE_SIZE), GFP_KERNEL);
- if (scq->skb == NULL)
- {
- kfree(scq->org);
- kfree(scq);
- return NULL;
- }
- scq->num_entries = size / NS_SCQE_SIZE;
- scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size);
- scq->next = scq->base;
- scq->last = scq->base + (scq->num_entries - 1);
- scq->tail = scq->last;
- scq->scd = scd;
- scq->num_entries = size / NS_SCQE_SIZE;
- scq->tbd_count = 0;
- init_waitqueue_head(&scq->scqfull_waitq);
- scq->full = 0;
- spin_lock_init(&scq->lock);
-
- for (i = 0; i < scq->num_entries; i++)
- scq->skb[i] = NULL;
-
- return scq;
+ if (error >= 17) {
+ writel(0x00000000, card->membase + CFG);
+ }
+ if (error >= 16) {
+ struct sk_buff *iovb;
+ while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
+ dev_kfree_skb_any(iovb);
+ }
+ if (error >= 15) {
+ struct sk_buff *sb;
+ while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
+ dev_kfree_skb_any(sb);
+ free_scq(card, card->scq0, NULL);
+ }
+ if (error >= 14) {
+ struct sk_buff *lb;
+ while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
+ dev_kfree_skb_any(lb);
+ }
+ if (error >= 13) {
+ struct sk_buff *hb;
+ while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
+ dev_kfree_skb_any(hb);
+ }
+ if (error >= 12) {
+ kfree(card->rsq.org);
+ }
+ if (error >= 11) {
+ kfree(card->tsq.org);
+ }
+ if (error >= 10) {
+ free_irq(card->pcidev->irq, card);
+ }
+ if (error >= 4) {
+ iounmap(card->membase);
+ }
+ if (error >= 3) {
+ pci_disable_device(card->pcidev);
+ kfree(card);
+ }
}
-
+static scq_info *get_scq(ns_dev *card, int size, u32 scd)
+{
+ scq_info *scq;
+ int i;
+
+ if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
+ return NULL;
+
+ scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
+ if (!scq)
+ return NULL;
+ scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma);
+ if (!scq->org) {
+ kfree(scq);
+ return NULL;
+ }
+ scq->skb = kmalloc(sizeof(struct sk_buff *) *
+ (size / NS_SCQE_SIZE), GFP_KERNEL);
+ if (!scq->skb) {
+ kfree(scq->org);
+ kfree(scq);
+ return NULL;
+ }
+ scq->num_entries = size / NS_SCQE_SIZE;
+ scq->base = PTR_ALIGN(scq->org, size);
+ scq->next = scq->base;
+ scq->last = scq->base + (scq->num_entries - 1);
+ scq->tail = scq->last;
+ scq->scd = scd;
+ scq->num_entries = size / NS_SCQE_SIZE;
+ scq->tbd_count = 0;
+ init_waitqueue_head(&scq->scqfull_waitq);
+ scq->full = 0;
+ spin_lock_init(&scq->lock);
+
+ for (i = 0; i < scq->num_entries; i++)
+ scq->skb[i] = NULL;
+
+ return scq;
+}
/* For variable rate SCQ vcc must be NULL */
-static void free_scq(scq_info *scq, struct atm_vcc *vcc)
+static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
{
- int i;
-
- if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
- for (i = 0; i < scq->num_entries; i++)
- {
- if (scq->skb[i] != NULL)
- {
- vcc = ATM_SKB(scq->skb[i])->vcc;
- if (vcc->pop != NULL)
- vcc->pop(vcc, scq->skb[i]);
- else
- dev_kfree_skb_any(scq->skb[i]);
- }
- }
- else /* vcc must be != NULL */
- {
- if (vcc == NULL)
- {
- printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
- for (i = 0; i < scq->num_entries; i++)
- dev_kfree_skb_any(scq->skb[i]);
- }
- else
- for (i = 0; i < scq->num_entries; i++)
- {
- if (scq->skb[i] != NULL)
- {
- if (vcc->pop != NULL)
- vcc->pop(vcc, scq->skb[i]);
- else
- dev_kfree_skb_any(scq->skb[i]);
- }
- }
- }
- kfree(scq->skb);
- kfree(scq->org);
- kfree(scq);
+ int i;
+
+ if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
+ for (i = 0; i < scq->num_entries; i++) {
+ if (scq->skb[i] != NULL) {
+ vcc = ATM_SKB(scq->skb[i])->vcc;
+ if (vcc->pop != NULL)
+ vcc->pop(vcc, scq->skb[i]);
+ else
+ dev_kfree_skb_any(scq->skb[i]);
+ }
+ } else { /* vcc must be != NULL */
+
+ if (vcc == NULL) {
+ printk
+ ("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
+ for (i = 0; i < scq->num_entries; i++)
+ dev_kfree_skb_any(scq->skb[i]);
+ } else
+ for (i = 0; i < scq->num_entries; i++) {
+ if (scq->skb[i] != NULL) {
+ if (vcc->pop != NULL)
+ vcc->pop(vcc, scq->skb[i]);
+ else
+ dev_kfree_skb_any(scq->skb[i]);
+ }
+ }
+ }
+ kfree(scq->skb);
+ pci_free_consistent(card->pcidev,
+ 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
+ VBR_SCQSIZE : CBR_SCQSIZE),
+ scq->org, scq->dma);
+ kfree(scq);
}
-
-
/* The handles passed must be pointers to the sk_buff containing the small
or large buffer(s) cast to u32. */
-static void push_rxbufs(ns_dev *card, struct sk_buff *skb)
+static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
{
- struct ns_skb_cb *cb = NS_SKB_CB(skb);
- u32 handle1, addr1;
- u32 handle2, addr2;
- u32 stat;
- unsigned long flags;
-
- /* *BARF* */
- handle2 = addr2 = 0;
- handle1 = (u32)skb;
- addr1 = (u32)virt_to_bus(skb->data);
+ struct sk_buff *handle1, *handle2;
+ u32 id1 = 0, id2 = 0;
+ u32 addr1, addr2;
+ u32 stat;
+ unsigned long flags;
+ int err;
+
+ /* *BARF* */
+ handle2 = NULL;
+ addr2 = 0;
+ handle1 = skb;
+ addr1 = pci_map_single(card->pcidev,
+ skb->data,
+ (NS_PRV_BUFTYPE(skb) == BUF_SM
+ ? NS_SMSKBSIZE : NS_LGSKBSIZE),
+ PCI_DMA_TODEVICE);
+ NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
#ifdef GENERAL_DEBUG
- if (!addr1)
- printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index);
+ if (!addr1)
+ printk("nicstar%d: push_rxbufs called with addr1 = 0.\n",
+ card->index);
#endif /* GENERAL_DEBUG */
- stat = readl(card->membase + STAT);
- card->sbfqc = ns_stat_sfbqc_get(stat);
- card->lbfqc = ns_stat_lfbqc_get(stat);
- if (cb->buf_type == BUF_SM)
- {
- if (!addr2)
- {
- if (card->sm_addr)
- {
- addr2 = card->sm_addr;
- handle2 = card->sm_handle;
- card->sm_addr = 0x00000000;
- card->sm_handle = 0x00000000;
- }
- else /* (!sm_addr) */
- {
- card->sm_addr = addr1;
- card->sm_handle = handle1;
- }
- }
- }
- else /* buf_type == BUF_LG */
- {
- if (!addr2)
- {
- if (card->lg_addr)
- {
- addr2 = card->lg_addr;
- handle2 = card->lg_handle;
- card->lg_addr = 0x00000000;
- card->lg_handle = 0x00000000;
- }
- else /* (!lg_addr) */
- {
- card->lg_addr = addr1;
- card->lg_handle = handle1;
- }
- }
- }
-
- if (addr2)
- {
- if (cb->buf_type == BUF_SM)
- {
- if (card->sbfqc >= card->sbnr.max)
- {
- skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue);
- dev_kfree_skb_any((struct sk_buff *) handle1);
- skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue);
- dev_kfree_skb_any((struct sk_buff *) handle2);
- return;
- }
- else
- card->sbfqc += 2;
- }
- else /* (buf_type == BUF_LG) */
- {
- if (card->lbfqc >= card->lbnr.max)
- {
- skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue);
- dev_kfree_skb_any((struct sk_buff *) handle1);
- skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue);
- dev_kfree_skb_any((struct sk_buff *) handle2);
- return;
- }
- else
- card->lbfqc += 2;
- }
-
- spin_lock_irqsave(&card->res_lock, flags);
-
- while (CMD_BUSY(card));
- writel(addr2, card->membase + DR3);
- writel(handle2, card->membase + DR2);
- writel(addr1, card->membase + DR1);
- writel(handle1, card->membase + DR0);
- writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD);
-
- spin_unlock_irqrestore(&card->res_lock, flags);
-
- XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
- (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2);
- }
-
- if (!card->efbie && card->sbfqc >= card->sbnr.min &&
- card->lbfqc >= card->lbnr.min)
- {
- card->efbie = 1;
- writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG);
- }
-
- return;
+ stat = readl(card->membase + STAT);
+ card->sbfqc = ns_stat_sfbqc_get(stat);
+ card->lbfqc = ns_stat_lfbqc_get(stat);
+ if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
+ if (!addr2) {
+ if (card->sm_addr) {
+ addr2 = card->sm_addr;
+ handle2 = card->sm_handle;
+ card->sm_addr = 0x00000000;
+ card->sm_handle = 0x00000000;
+ } else { /* (!sm_addr) */
+
+ card->sm_addr = addr1;
+ card->sm_handle = handle1;
+ }
+ }
+ } else { /* buf_type == BUF_LG */
+
+ if (!addr2) {
+ if (card->lg_addr) {
+ addr2 = card->lg_addr;
+ handle2 = card->lg_handle;
+ card->lg_addr = 0x00000000;
+ card->lg_handle = 0x00000000;
+ } else { /* (!lg_addr) */
+
+ card->lg_addr = addr1;
+ card->lg_handle = handle1;
+ }
+ }
+ }
+
+ if (addr2) {
+ if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
+ if (card->sbfqc >= card->sbnr.max) {
+ skb_unlink(handle1, &card->sbpool.queue);
+ dev_kfree_skb_any(handle1);
+ skb_unlink(handle2, &card->sbpool.queue);
+ dev_kfree_skb_any(handle2);
+ return;
+ } else
+ card->sbfqc += 2;
+ } else { /* (buf_type == BUF_LG) */
+
+ if (card->lbfqc >= card->lbnr.max) {
+ skb_unlink(handle1, &card->lbpool.queue);
+ dev_kfree_skb_any(handle1);
+ skb_unlink(handle2, &card->lbpool.queue);
+ dev_kfree_skb_any(handle2);
+ return;
+ } else
+ card->lbfqc += 2;
+ }
+
+ do {
+ if (!idr_pre_get(&card->idr, GFP_ATOMIC)) {
+ printk(KERN_ERR
+ "nicstar%d: no free memory for idr\n",
+ card->index);
+ goto out;
+ }
+
+ if (!id1)
+ err = idr_get_new_above(&card->idr, handle1, 0, &id1);
+
+ if (!id2 && err == 0)
+ err = idr_get_new_above(&card->idr, handle2, 0, &id2);
+
+ } while (err == -EAGAIN);
+
+ if (err)
+ goto out;
+
+ spin_lock_irqsave(&card->res_lock, flags);
+ while (CMD_BUSY(card)) ;
+ writel(addr2, card->membase + DR3);
+ writel(id2, card->membase + DR2);
+ writel(addr1, card->membase + DR1);
+ writel(id1, card->membase + DR0);
+ writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb),
+ card->membase + CMD);
+ spin_unlock_irqrestore(&card->res_lock, flags);
+
+ XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n",
+ card->index,
+ (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"),
+ addr1, addr2);
+ }
+
+ if (!card->efbie && card->sbfqc >= card->sbnr.min &&
+ card->lbfqc >= card->lbnr.min) {
+ card->efbie = 1;
+ writel((readl(card->membase + CFG) | NS_CFG_EFBIE),
+ card->membase + CFG);
+ }
+
+out:
+ return;
}
-
-
static irqreturn_t ns_irq_handler(int irq, void *dev_id)
{
- u32 stat_r;
- ns_dev *card;
- struct atm_dev *dev;
- unsigned long flags;
-
- card = (ns_dev *) dev_id;
- dev = card->atmdev;
- card->intcnt++;
-
- PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
-
- spin_lock_irqsave(&card->int_lock, flags);
-
- stat_r = readl(card->membase + STAT);
-
- /* Transmit Status Indicator has been written to T. S. Queue */
- if (stat_r & NS_STAT_TSIF)
- {
- TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
- process_tsq(card);
- writel(NS_STAT_TSIF, card->membase + STAT);
- }
-
- /* Incomplete CS-PDU has been transmitted */
- if (stat_r & NS_STAT_TXICP)
- {
- writel(NS_STAT_TXICP, card->membase + STAT);
- TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
- card->index);
- }
-
- /* Transmit Status Queue 7/8 full */
- if (stat_r & NS_STAT_TSQF)
- {
- writel(NS_STAT_TSQF, card->membase + STAT);
- PRINTK("nicstar%d: TSQ full.\n", card->index);
- process_tsq(card);
- }
-
- /* Timer overflow */
- if (stat_r & NS_STAT_TMROF)
- {
- writel(NS_STAT_TMROF, card->membase + STAT);
- PRINTK("nicstar%d: Timer overflow.\n", card->index);
- }
-
- /* PHY device interrupt signal active */
- if (stat_r & NS_STAT_PHYI)
- {
- writel(NS_STAT_PHYI, card->membase + STAT);
- PRINTK("nicstar%d: PHY interrupt.\n", card->index);
- if (dev->phy && dev->phy->interrupt) {
- dev->phy->interrupt(dev);
- }
- }
-
- /* Small Buffer Queue is full */
- if (stat_r & NS_STAT_SFBQF)
- {
- writel(NS_STAT_SFBQF, card->membase + STAT);
- printk("nicstar%d: Small free buffer queue is full.\n", card->index);
- }
-
- /* Large Buffer Queue is full */
- if (stat_r & NS_STAT_LFBQF)
- {
- writel(NS_STAT_LFBQF, card->membase + STAT);
- printk("nicstar%d: Large free buffer queue is full.\n", card->index);
- }
-
- /* Receive Status Queue is full */
- if (stat_r & NS_STAT_RSQF)
- {
- writel(NS_STAT_RSQF, card->membase + STAT);
- printk("nicstar%d: RSQ full.\n", card->index);
- process_rsq(card);
- }
-
- /* Complete CS-PDU received */
- if (stat_r & NS_STAT_EOPDU)
- {
- RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
- process_rsq(card);
- writel(NS_STAT_EOPDU, card->membase + STAT);
- }
-
- /* Raw cell received */
- if (stat_r & NS_STAT_RAWCF)
- {
- writel(NS_STAT_RAWCF, card->membase + STAT);
+ u32 stat_r;
+ ns_dev *card;
+ struct atm_dev *dev;
+ unsigned long flags;
+
+ card = (ns_dev *) dev_id;
+ dev = card->atmdev;
+ card->intcnt++;
+
+ PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
+
+ spin_lock_irqsave(&card->int_lock, flags);
+
+ stat_r = readl(card->membase + STAT);
+
+ /* Transmit Status Indicator has been written to T. S. Queue */
+ if (stat_r & NS_STAT_TSIF) {
+ TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
+ process_tsq(card);
+ writel(NS_STAT_TSIF, card->membase + STAT);
+ }
+
+ /* Incomplete CS-PDU has been transmitted */
+ if (stat_r & NS_STAT_TXICP) {
+ writel(NS_STAT_TXICP, card->membase + STAT);
+ TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
+ card->index);
+ }
+
+ /* Transmit Status Queue 7/8 full */
+ if (stat_r & NS_STAT_TSQF) {
+ writel(NS_STAT_TSQF, card->membase + STAT);
+ PRINTK("nicstar%d: TSQ full.\n", card->index);
+ process_tsq(card);
+ }
+
+ /* Timer overflow */
+ if (stat_r & NS_STAT_TMROF) {
+ writel(NS_STAT_TMROF, card->membase + STAT);
+ PRINTK("nicstar%d: Timer overflow.\n", card->index);
+ }
+
+ /* PHY device interrupt signal active */
+ if (stat_r & NS_STAT_PHYI) {
+ writel(NS_STAT_PHYI, card->membase + STAT);
+ PRINTK("nicstar%d: PHY interrupt.\n", card->index);
+ if (dev->phy && dev->phy->interrupt) {
+ dev->phy->interrupt(dev);
+ }
+ }
+
+ /* Small Buffer Queue is full */
+ if (stat_r & NS_STAT_SFBQF) {
+ writel(NS_STAT_SFBQF, card->membase + STAT);
+ printk("nicstar%d: Small free buffer queue is full.\n",
+ card->index);
+ }
+
+ /* Large Buffer Queue is full */
+ if (stat_r & NS_STAT_LFBQF) {
+ writel(NS_STAT_LFBQF, card->membase + STAT);
+ printk("nicstar%d: Large free buffer queue is full.\n",
+ card->index);
+ }
+
+ /* Receive Status Queue is full */
+ if (stat_r & NS_STAT_RSQF) {
+ writel(NS_STAT_RSQF, card->membase + STAT);
+ printk("nicstar%d: RSQ full.\n", card->index);
+ process_rsq(card);
+ }
+
+ /* Complete CS-PDU received */
+ if (stat_r & NS_STAT_EOPDU) {
+ RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
+ process_rsq(card);
+ writel(NS_STAT_EOPDU, card->membase + STAT);
+ }
+
+ /* Raw cell received */
+ if (stat_r & NS_STAT_RAWCF) {
+ writel(NS_STAT_RAWCF, card->membase + STAT);
#ifndef RCQ_SUPPORT
- printk("nicstar%d: Raw cell received and no support yet...\n",
- card->index);
+ printk("nicstar%d: Raw cell received and no support yet...\n",
+ card->index);
#endif /* RCQ_SUPPORT */
- /* NOTE: the following procedure may keep a raw cell pending until the
- next interrupt. As this preliminary support is only meant to
- avoid buffer leakage, this is not an issue. */
- while (readl(card->membase + RAWCT) != card->rawch)
- {
- ns_rcqe *rawcell;
-
- rawcell = (ns_rcqe *) bus_to_virt(card->rawch);
- if (ns_rcqe_islast(rawcell))
- {
- struct sk_buff *oldbuf;
-
- oldbuf = card->rcbuf;
- card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell);
- card->rawch = (u32) virt_to_bus(card->rcbuf->data);
- recycle_rx_buf(card, oldbuf);
- }
- else
- card->rawch += NS_RCQE_SIZE;
- }
- }
-
- /* Small buffer queue is empty */
- if (stat_r & NS_STAT_SFBQE)
- {
- int i;
- struct sk_buff *sb;
-
- writel(NS_STAT_SFBQE, card->membase + STAT);
- printk("nicstar%d: Small free buffer queue empty.\n",
- card->index);
- for (i = 0; i < card->sbnr.min; i++)
- {
- sb = dev_alloc_skb(NS_SMSKBSIZE);
- if (sb == NULL)
- {
- writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
- card->efbie = 0;
- break;
- }
- NS_SKB_CB(sb)->buf_type = BUF_SM;
- skb_queue_tail(&card->sbpool.queue, sb);
- skb_reserve(sb, NS_AAL0_HEADER);
- push_rxbufs(card, sb);
- }
- card->sbfqc = i;
- process_rsq(card);
- }
-
- /* Large buffer queue empty */
- if (stat_r & NS_STAT_LFBQE)
- {
- int i;
- struct sk_buff *lb;
-
- writel(NS_STAT_LFBQE, card->membase + STAT);
- printk("nicstar%d: Large free buffer queue empty.\n",
- card->index);
- for (i = 0; i < card->lbnr.min; i++)
- {
- lb = dev_alloc_skb(NS_LGSKBSIZE);
- if (lb == NULL)
- {
- writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
- card->efbie = 0;
- break;
- }
- NS_SKB_CB(lb)->buf_type = BUF_LG;
- skb_queue_tail(&card->lbpool.queue, lb);
- skb_reserve(lb, NS_SMBUFSIZE);
- push_rxbufs(card, lb);
- }
- card->lbfqc = i;
- process_rsq(card);
- }
-
- /* Receive Status Queue is 7/8 full */
- if (stat_r & NS_STAT_RSQAF)
- {
- writel(NS_STAT_RSQAF, card->membase + STAT);
- RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
- process_rsq(card);
- }
-
- spin_unlock_irqrestore(&card->int_lock, flags);
- PRINTK("nicstar%d: end of interrupt service\n", card->index);
- return IRQ_HANDLED;
+ /* NOTE: the following procedure may keep a raw cell pending until the
+ next interrupt. As this preliminary support is only meant to
+ avoid buffer leakage, this is not an issue. */
+ while (readl(card->membase + RAWCT) != card->rawch) {
+
+ if (ns_rcqe_islast(card->rawcell)) {
+ struct sk_buff *oldbuf;
+
+ oldbuf = card->rcbuf;
+ card->rcbuf = idr_find(&card->idr,
+ ns_rcqe_nextbufhandle(card->rawcell));
+ card->rawch = NS_PRV_DMA(card->rcbuf);
+ card->rawcell = (struct ns_rcqe *)
+ card->rcbuf->data;
+ recycle_rx_buf(card, oldbuf);
+ } else {
+ card->rawch += NS_RCQE_SIZE;
+ card->rawcell++;
+ }
+ }
+ }
+
+ /* Small buffer queue is empty */
+ if (stat_r & NS_STAT_SFBQE) {
+ int i;
+ struct sk_buff *sb;
+
+ writel(NS_STAT_SFBQE, card->membase + STAT);
+ printk("nicstar%d: Small free buffer queue empty.\n",
+ card->index);
+ for (i = 0; i < card->sbnr.min; i++) {
+ sb = dev_alloc_skb(NS_SMSKBSIZE);
+ if (sb == NULL) {
+ writel(readl(card->membase + CFG) &
+ ~NS_CFG_EFBIE, card->membase + CFG);
+ card->efbie = 0;
+ break;
+ }
+ NS_PRV_BUFTYPE(sb) = BUF_SM;
+ skb_queue_tail(&card->sbpool.queue, sb);
+ skb_reserve(sb, NS_AAL0_HEADER);
+ push_rxbufs(card, sb);
+ }
+ card->sbfqc = i;
+ process_rsq(card);
+ }
+
+ /* Large buffer queue empty */
+ if (stat_r & NS_STAT_LFBQE) {
+ int i;
+ struct sk_buff *lb;
+
+ writel(NS_STAT_LFBQE, card->membase + STAT);
+ printk("nicstar%d: Large free buffer queue empty.\n",
+ card->index);
+ for (i = 0; i < card->lbnr.min; i++) {
+ lb = dev_alloc_skb(NS_LGSKBSIZE);
+ if (lb == NULL) {
+ writel(readl(card->membase + CFG) &
+ ~NS_CFG_EFBIE, card->membase + CFG);
+ card->efbie = 0;
+ break;
+ }
+ NS_PRV_BUFTYPE(lb) = BUF_LG;
+ skb_queue_tail(&card->lbpool.queue, lb);
+ skb_reserve(lb, NS_SMBUFSIZE);
+ push_rxbufs(card, lb);
+ }
+ card->lbfqc = i;
+ process_rsq(card);
+ }
+
+ /* Receive Status Queue is 7/8 full */
+ if (stat_r & NS_STAT_RSQAF) {
+ writel(NS_STAT_RSQAF, card->membase + STAT);
+ RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
+ process_rsq(card);
+ }
+
+ spin_unlock_irqrestore(&card->int_lock, flags);
+ PRINTK("nicstar%d: end of interrupt service\n", card->index);
+ return IRQ_HANDLED;
}
-
-
static int ns_open(struct atm_vcc *vcc)
{
- ns_dev *card;
- vc_map *vc;
- unsigned long tmpl, modl;
- int tcr, tcra; /* target cell rate, and absolute value */
- int n = 0; /* Number of entries in the TST. Initialized to remove
- the compiler warning. */
- u32 u32d[4];
- int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler
- warning. How I wish compilers were clever enough to
- tell which variables can truly be used
- uninitialized... */
- int inuse; /* tx or rx vc already in use by another vcc */
- short vpi = vcc->vpi;
- int vci = vcc->vci;
-
- card = (ns_dev *) vcc->dev->dev_data;
- PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci);
- if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
- {
- PRINTK("nicstar%d: unsupported AAL.\n", card->index);
- return -EINVAL;
- }
-
- vc = &(card->vcmap[vpi << card->vcibits | vci]);
- vcc->dev_data = vc;
-
- inuse = 0;
- if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
- inuse = 1;
- if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
- inuse += 2;
- if (inuse)
- {
- printk("nicstar%d: %s vci already in use.\n", card->index,
- inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
- return -EINVAL;
- }
-
- set_bit(ATM_VF_ADDR,&vcc->flags);
-
- /* NOTE: You are not allowed to modify an open connection's QOS. To change
- that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
- needed to do that. */
- if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
- {
- scq_info *scq;
-
- set_bit(ATM_VF_PARTIAL,&vcc->flags);
- if (vcc->qos.txtp.traffic_class == ATM_CBR)
- {
- /* Check requested cell rate and availability of SCD */
- if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 &&
- vcc->qos.txtp.min_pcr == 0)
- {
- PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
- card->index);
- clear_bit(ATM_VF_PARTIAL,&vcc->flags);
- clear_bit(ATM_VF_ADDR,&vcc->flags);
- return -EINVAL;
- }
-
- tcr = atm_pcr_goal(&(vcc->qos.txtp));
- tcra = tcr >= 0 ? tcr : -tcr;
-
- PRINTK("nicstar%d: target cell rate = %d.\n", card->index,
- vcc->qos.txtp.max_pcr);
-
- tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES;
- modl = tmpl % card->max_pcr;
-
- n = (int)(tmpl / card->max_pcr);
- if (tcr > 0)
- {
- if (modl > 0) n++;
- }
- else if (tcr == 0)
- {
- if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0)
- {
- PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index);
- clear_bit(ATM_VF_PARTIAL,&vcc->flags);
- clear_bit(ATM_VF_ADDR,&vcc->flags);
- return -EINVAL;
- }
- }
-
- if (n == 0)
- {
- printk("nicstar%d: selected bandwidth < granularity.\n", card->index);
- clear_bit(ATM_VF_PARTIAL,&vcc->flags);
- clear_bit(ATM_VF_ADDR,&vcc->flags);
- return -EINVAL;
- }
-
- if (n > (card->tst_free_entries - NS_TST_RESERVED))
- {
- PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index);
- clear_bit(ATM_VF_PARTIAL,&vcc->flags);
- clear_bit(ATM_VF_ADDR,&vcc->flags);
- return -EINVAL;
- }
- else
- card->tst_free_entries -= n;
-
- XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n);
- for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++)
- {
- if (card->scd2vc[frscdi] == NULL)
- {
- card->scd2vc[frscdi] = vc;
- break;
- }
- }
- if (frscdi == NS_FRSCD_NUM)
- {
- PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index);
- card->tst_free_entries += n;
- clear_bit(ATM_VF_PARTIAL,&vcc->flags);
- clear_bit(ATM_VF_ADDR,&vcc->flags);
- return -EBUSY;
- }
-
- vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
-
- scq = get_scq(CBR_SCQSIZE, vc->cbr_scd);
- if (scq == NULL)
- {
- PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index);
- card->scd2vc[frscdi] = NULL;
- card->tst_free_entries += n;
- clear_bit(ATM_VF_PARTIAL,&vcc->flags);
- clear_bit(ATM_VF_ADDR,&vcc->flags);
- return -ENOMEM;
- }
- vc->scq = scq;
- u32d[0] = (u32) virt_to_bus(scq->base);
- u32d[1] = (u32) 0x00000000;
- u32d[2] = (u32) 0xffffffff;
- u32d[3] = (u32) 0x00000000;
- ns_write_sram(card, vc->cbr_scd, u32d, 4);
-
- fill_tst(card, n, vc);
- }
- else if (vcc->qos.txtp.traffic_class == ATM_UBR)
- {
- vc->cbr_scd = 0x00000000;
- vc->scq = card->scq0;
- }
-
- if (vcc->qos.txtp.traffic_class != ATM_NONE)
- {
- vc->tx = 1;
- vc->tx_vcc = vcc;
- vc->tbd_count = 0;
- }
- if (vcc->qos.rxtp.traffic_class != ATM_NONE)
- {
- u32 status;
-
- vc->rx = 1;
- vc->rx_vcc = vcc;
- vc->rx_iov = NULL;
-
- /* Open the connection in hardware */
- if (vcc->qos.aal == ATM_AAL5)
- status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
- else /* vcc->qos.aal == ATM_AAL0 */
- status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
+ ns_dev *card;
+ vc_map *vc;
+ unsigned long tmpl, modl;
+ int tcr, tcra; /* target cell rate, and absolute value */
+ int n = 0; /* Number of entries in the TST. Initialized to remove
+ the compiler warning. */
+ u32 u32d[4];
+ int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler
+ warning. How I wish compilers were clever enough to
+ tell which variables can truly be used
+ uninitialized... */
+ int inuse; /* tx or rx vc already in use by another vcc */
+ short vpi = vcc->vpi;
+ int vci = vcc->vci;
+
+ card = (ns_dev *) vcc->dev->dev_data;
+ PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi,
+ vci);
+ if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
+ PRINTK("nicstar%d: unsupported AAL.\n", card->index);
+ return -EINVAL;
+ }
+
+ vc = &(card->vcmap[vpi << card->vcibits | vci]);
+ vcc->dev_data = vc;
+
+ inuse = 0;
+ if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
+ inuse = 1;
+ if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
+ inuse += 2;
+ if (inuse) {
+ printk("nicstar%d: %s vci already in use.\n", card->index,
+ inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
+ return -EINVAL;
+ }
+
+ set_bit(ATM_VF_ADDR, &vcc->flags);
+
+ /* NOTE: You are not allowed to modify an open connection's QOS. To change
+ that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
+ needed to do that. */
+ if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
+ scq_info *scq;
+
+ set_bit(ATM_VF_PARTIAL, &vcc->flags);
+ if (vcc->qos.txtp.traffic_class == ATM_CBR) {
+ /* Check requested cell rate and availability of SCD */
+ if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0
+ && vcc->qos.txtp.min_pcr == 0) {
+ PRINTK
+ ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
+ card->index);
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+ return -EINVAL;
+ }
+
+ tcr = atm_pcr_goal(&(vcc->qos.txtp));
+ tcra = tcr >= 0 ? tcr : -tcr;
+
+ PRINTK("nicstar%d: target cell rate = %d.\n",
+ card->index, vcc->qos.txtp.max_pcr);
+
+ tmpl =
+ (unsigned long)tcra *(unsigned long)
+ NS_TST_NUM_ENTRIES;
+ modl = tmpl % card->max_pcr;
+
+ n = (int)(tmpl / card->max_pcr);
+ if (tcr > 0) {
+ if (modl > 0)
+ n++;
+ } else if (tcr == 0) {
+ if ((n =
+ (card->tst_free_entries -
+ NS_TST_RESERVED)) <= 0) {
+ PRINTK
+ ("nicstar%d: no CBR bandwidth free.\n",
+ card->index);
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+ return -EINVAL;
+ }
+ }
+
+ if (n == 0) {
+ printk
+ ("nicstar%d: selected bandwidth < granularity.\n",
+ card->index);
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+ return -EINVAL;
+ }
+
+ if (n > (card->tst_free_entries - NS_TST_RESERVED)) {
+ PRINTK
+ ("nicstar%d: not enough free CBR bandwidth.\n",
+ card->index);
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+ return -EINVAL;
+ } else
+ card->tst_free_entries -= n;
+
+ XPRINTK("nicstar%d: writing %d tst entries.\n",
+ card->index, n);
+ for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) {
+ if (card->scd2vc[frscdi] == NULL) {
+ card->scd2vc[frscdi] = vc;
+ break;
+ }
+ }
+ if (frscdi == NS_FRSCD_NUM) {
+ PRINTK
+ ("nicstar%d: no SCD available for CBR channel.\n",
+ card->index);
+ card->tst_free_entries += n;
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+ return -EBUSY;
+ }
+
+ vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
+
+ scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd);
+ if (scq == NULL) {
+ PRINTK("nicstar%d: can't get fixed rate SCQ.\n",
+ card->index);
+ card->scd2vc[frscdi] = NULL;
+ card->tst_free_entries += n;
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
+ return -ENOMEM;
+ }
+ vc->scq = scq;
+ u32d[0] = scq_virt_to_bus(scq, scq->base);
+ u32d[1] = (u32) 0x00000000;
+ u32d[2] = (u32) 0xffffffff;
+ u32d[3] = (u32) 0x00000000;
+ ns_write_sram(card, vc->cbr_scd, u32d, 4);
+
+ fill_tst(card, n, vc);
+ } else if (vcc->qos.txtp.traffic_class == ATM_UBR) {
+ vc->cbr_scd = 0x00000000;
+ vc->scq = card->scq0;
+ }
+
+ if (vcc->qos.txtp.traffic_class != ATM_NONE) {
+ vc->tx = 1;
+ vc->tx_vcc = vcc;
+ vc->tbd_count = 0;
+ }
+ if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
+ u32 status;
+
+ vc->rx = 1;
+ vc->rx_vcc = vcc;
+ vc->rx_iov = NULL;
+
+ /* Open the connection in hardware */
+ if (vcc->qos.aal == ATM_AAL5)
+ status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
+ else /* vcc->qos.aal == ATM_AAL0 */
+ status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
#ifdef RCQ_SUPPORT
- status |= NS_RCTE_RAWCELLINTEN;
+ status |= NS_RCTE_RAWCELLINTEN;
#endif /* RCQ_SUPPORT */
- ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) *
- NS_RCT_ENTRY_SIZE, &status, 1);
- }
-
- }
-
- set_bit(ATM_VF_READY,&vcc->flags);
- return 0;
-}
+ ns_write_sram(card,
+ NS_RCT +
+ (vpi << card->vcibits | vci) *
+ NS_RCT_ENTRY_SIZE, &status, 1);
+ }
+ }
+ set_bit(ATM_VF_READY, &vcc->flags);
+ return 0;
+}
static void ns_close(struct atm_vcc *vcc)
{
- vc_map *vc;
- ns_dev *card;
- u32 data;
- int i;
-
- vc = vcc->dev_data;
- card = vcc->dev->dev_data;
- PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
- (int) vcc->vpi, vcc->vci);
-
- clear_bit(ATM_VF_READY,&vcc->flags);
-
- if (vcc->qos.rxtp.traffic_class != ATM_NONE)
- {
- u32 addr;
- unsigned long flags;
-
- addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
- spin_lock_irqsave(&card->res_lock, flags);
- while(CMD_BUSY(card));
- writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD);
- spin_unlock_irqrestore(&card->res_lock, flags);
-
- vc->rx = 0;
- if (vc->rx_iov != NULL)
- {
- struct sk_buff *iovb;
- u32 stat;
-
- stat = readl(card->membase + STAT);
- card->sbfqc = ns_stat_sfbqc_get(stat);
- card->lbfqc = ns_stat_lfbqc_get(stat);
-
- PRINTK("nicstar%d: closing a VC with pending rx buffers.\n",
- card->index);
- iovb = vc->rx_iov;
- recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
- NS_SKB(iovb)->iovcnt);
- NS_SKB(iovb)->iovcnt = 0;
- NS_SKB(iovb)->vcc = NULL;
- spin_lock_irqsave(&card->int_lock, flags);
- recycle_iov_buf(card, iovb);
- spin_unlock_irqrestore(&card->int_lock, flags);
- vc->rx_iov = NULL;
- }
- }
-
- if (vcc->qos.txtp.traffic_class != ATM_NONE)
- {
- vc->tx = 0;
- }
-
- if (vcc->qos.txtp.traffic_class == ATM_CBR)
- {
- unsigned long flags;
- ns_scqe *scqep;
- scq_info *scq;
-
- scq = vc->scq;
-
- for (;;)
- {
- spin_lock_irqsave(&scq->lock, flags);
- scqep = scq->next;
- if (scqep == scq->base)
- scqep = scq->last;
- else
- scqep--;
- if (scqep == scq->tail)
- {
- spin_unlock_irqrestore(&scq->lock, flags);
- break;
- }
- /* If the last entry is not a TSR, place one in the SCQ in order to
- be able to completely drain it and then close. */
- if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next)
- {
- ns_scqe tsr;
- u32 scdi, scqi;
- u32 data;
- int index;
-
- tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
- scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
- scqi = scq->next - scq->base;
- tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
- tsr.word_3 = 0x00000000;
- tsr.word_4 = 0x00000000;
- *scq->next = tsr;
- index = (int) scqi;
- scq->skb[index] = NULL;
- if (scq->next == scq->last)
- scq->next = scq->base;
- else
- scq->next++;
- data = (u32) virt_to_bus(scq->next);
- ns_write_sram(card, scq->scd, &data, 1);
- }
- spin_unlock_irqrestore(&scq->lock, flags);
- schedule();
- }
-
- /* Free all TST entries */
- data = NS_TST_OPCODE_VARIABLE;
- for (i = 0; i < NS_TST_NUM_ENTRIES; i++)
- {
- if (card->tste2vc[i] == vc)
- {
- ns_write_sram(card, card->tst_addr + i, &data, 1);
- card->tste2vc[i] = NULL;
- card->tst_free_entries++;
- }
- }
-
- card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
- free_scq(vc->scq, vcc);
- }
-
- /* remove all references to vcc before deleting it */
- if (vcc->qos.txtp.traffic_class != ATM_NONE)
- {
- unsigned long flags;
- scq_info *scq = card->scq0;
-
- spin_lock_irqsave(&scq->lock, flags);
-
- for(i = 0; i < scq->num_entries; i++) {
- if(scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
- ATM_SKB(scq->skb[i])->vcc = NULL;
- atm_return(vcc, scq->skb[i]->truesize);
- PRINTK("nicstar: deleted pending vcc mapping\n");
- }
- }
-
- spin_unlock_irqrestore(&scq->lock, flags);
- }
-
- vcc->dev_data = NULL;
- clear_bit(ATM_VF_PARTIAL,&vcc->flags);
- clear_bit(ATM_VF_ADDR,&vcc->flags);
+ vc_map *vc;
+ ns_dev *card;
+ u32 data;
+ int i;
+
+ vc = vcc->dev_data;
+ card = vcc->dev->dev_data;
+ PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
+ (int)vcc->vpi, vcc->vci);
+
+ clear_bit(ATM_VF_READY, &vcc->flags);
+
+ if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
+ u32 addr;
+ unsigned long flags;
+
+ addr =
+ NS_RCT +
+ (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
+ spin_lock_irqsave(&card->res_lock, flags);
+ while (CMD_BUSY(card)) ;
+ writel(NS_CMD_CLOSE_CONNECTION | addr << 2,
+ card->membase + CMD);
+ spin_unlock_irqrestore(&card->res_lock, flags);
+
+ vc->rx = 0;
+ if (vc->rx_iov != NULL) {
+ struct sk_buff *iovb;
+ u32 stat;
+
+ stat = readl(card->membase + STAT);
+ card->sbfqc = ns_stat_sfbqc_get(stat);
+ card->lbfqc = ns_stat_lfbqc_get(stat);
+
+ PRINTK
+ ("nicstar%d: closing a VC with pending rx buffers.\n",
+ card->index);
+ iovb = vc->rx_iov;
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ NS_PRV_IOVCNT(iovb) = 0;
+ spin_lock_irqsave(&card->int_lock, flags);
+ recycle_iov_buf(card, iovb);
+ spin_unlock_irqrestore(&card->int_lock, flags);
+ vc->rx_iov = NULL;
+ }
+ }
+
+ if (vcc->qos.txtp.traffic_class != ATM_NONE) {
+ vc->tx = 0;
+ }
+
+ if (vcc->qos.txtp.traffic_class == ATM_CBR) {
+ unsigned long flags;
+ ns_scqe *scqep;
+ scq_info *scq;
+
+ scq = vc->scq;
+
+ for (;;) {
+ spin_lock_irqsave(&scq->lock, flags);
+ scqep = scq->next;
+ if (scqep == scq->base)
+ scqep = scq->last;
+ else
+ scqep--;
+ if (scqep == scq->tail) {
+ spin_unlock_irqrestore(&scq->lock, flags);
+ break;
+ }
+ /* If the last entry is not a TSR, place one in the SCQ in order to
+ be able to completely drain it and then close. */
+ if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) {
+ ns_scqe tsr;
+ u32 scdi, scqi;
+ u32 data;
+ int index;
+
+ tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
+ scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
+ scqi = scq->next - scq->base;
+ tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
+ tsr.word_3 = 0x00000000;
+ tsr.word_4 = 0x00000000;
+ *scq->next = tsr;
+ index = (int)scqi;
+ scq->skb[index] = NULL;
+ if (scq->next == scq->last)
+ scq->next = scq->base;
+ else
+ scq->next++;
+ data = scq_virt_to_bus(scq, scq->next);
+ ns_write_sram(card, scq->scd, &data, 1);
+ }
+ spin_unlock_irqrestore(&scq->lock, flags);
+ schedule();
+ }
+
+ /* Free all TST entries */
+ data = NS_TST_OPCODE_VARIABLE;
+ for (i = 0; i < NS_TST_NUM_ENTRIES; i++) {
+ if (card->tste2vc[i] == vc) {
+ ns_write_sram(card, card->tst_addr + i, &data,
+ 1);
+ card->tste2vc[i] = NULL;
+ card->tst_free_entries++;
+ }
+ }
+
+ card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
+ free_scq(card, vc->scq, vcc);
+ }
+
+ /* remove all references to vcc before deleting it */
+ if (vcc->qos.txtp.traffic_class != ATM_NONE) {
+ unsigned long flags;
+ scq_info *scq = card->scq0;
+
+ spin_lock_irqsave(&scq->lock, flags);
+
+ for (i = 0; i < scq->num_entries; i++) {
+ if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
+ ATM_SKB(scq->skb[i])->vcc = NULL;
+ atm_return(vcc, scq->skb[i]->truesize);
+ PRINTK
+ ("nicstar: deleted pending vcc mapping\n");
+ }
+ }
+
+ spin_unlock_irqrestore(&scq->lock, flags);
+ }
+
+ vcc->dev_data = NULL;
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
+ clear_bit(ATM_VF_ADDR, &vcc->flags);
#ifdef RX_DEBUG
- {
- u32 stat, cfg;
- stat = readl(card->membase + STAT);
- cfg = readl(card->membase + CFG);
- printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg);
- printk("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n",
- (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last,
- readl(card->membase + TSQT));
- printk("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n",
- (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last,
- readl(card->membase + RSQT));
- printk("Empty free buffer queue interrupt %s \n",
- card->efbie ? "enabled" : "disabled");
- printk("SBCNT = %d count = %d LBCNT = %d count = %d \n",
- ns_stat_sfbqc_get(stat), card->sbpool.count,
- ns_stat_lfbqc_get(stat), card->lbpool.count);
- printk("hbpool.count = %d iovpool.count = %d \n",
- card->hbpool.count, card->iovpool.count);
- }
+ {
+ u32 stat, cfg;
+ stat = readl(card->membase + STAT);
+ cfg = readl(card->membase + CFG);
+ printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg);
+ printk
+ ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n",
+ card->tsq.base, card->tsq.next,
+ card->tsq.last, readl(card->membase + TSQT));
+ printk
+ ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n",
+ card->rsq.base, card->rsq.next,
+ card->rsq.last, readl(card->membase + RSQT));
+ printk("Empty free buffer queue interrupt %s \n",
+ card->efbie ? "enabled" : "disabled");
+ printk("SBCNT = %d count = %d LBCNT = %d count = %d \n",
+ ns_stat_sfbqc_get(stat), card->sbpool.count,
+ ns_stat_lfbqc_get(stat), card->lbpool.count);
+ printk("hbpool.count = %d iovpool.count = %d \n",
+ card->hbpool.count, card->iovpool.count);
+ }
#endif /* RX_DEBUG */
}
-
-
-static void fill_tst(ns_dev *card, int n, vc_map *vc)
+static void fill_tst(ns_dev * card, int n, vc_map * vc)
{
- u32 new_tst;
- unsigned long cl;
- int e, r;
- u32 data;
-
- /* It would be very complicated to keep the two TSTs synchronized while
- assuring that writes are only made to the inactive TST. So, for now I
- will use only one TST. If problems occur, I will change this again */
-
- new_tst = card->tst_addr;
-
- /* Fill procedure */
-
- for (e = 0; e < NS_TST_NUM_ENTRIES; e++)
- {
- if (card->tste2vc[e] == NULL)
- break;
- }
- if (e == NS_TST_NUM_ENTRIES) {
- printk("nicstar%d: No free TST entries found. \n", card->index);
- return;
- }
-
- r = n;
- cl = NS_TST_NUM_ENTRIES;
- data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
-
- while (r > 0)
- {
- if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL)
- {
- card->tste2vc[e] = vc;
- ns_write_sram(card, new_tst + e, &data, 1);
- cl -= NS_TST_NUM_ENTRIES;
- r--;
- }
-
- if (++e == NS_TST_NUM_ENTRIES) {
- e = 0;
- }
- cl += n;
- }
-
- /* End of fill procedure */
-
- data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
- ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
- ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
- card->tst_addr = new_tst;
+ u32 new_tst;
+ unsigned long cl;
+ int e, r;
+ u32 data;
+
+ /* It would be very complicated to keep the two TSTs synchronized while
+ assuring that writes are only made to the inactive TST. So, for now I
+ will use only one TST. If problems occur, I will change this again */
+
+ new_tst = card->tst_addr;
+
+ /* Fill procedure */
+
+ for (e = 0; e < NS_TST_NUM_ENTRIES; e++) {
+ if (card->tste2vc[e] == NULL)
+ break;
+ }
+ if (e == NS_TST_NUM_ENTRIES) {
+ printk("nicstar%d: No free TST entries found. \n", card->index);
+ return;
+ }
+
+ r = n;
+ cl = NS_TST_NUM_ENTRIES;
+ data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
+
+ while (r > 0) {
+ if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) {
+ card->tste2vc[e] = vc;
+ ns_write_sram(card, new_tst + e, &data, 1);
+ cl -= NS_TST_NUM_ENTRIES;
+ r--;
+ }
+
+ if (++e == NS_TST_NUM_ENTRIES) {
+ e = 0;
+ }
+ cl += n;
+ }
+
+ /* End of fill procedure */
+
+ data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
+ ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
+ ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
+ card->tst_addr = new_tst;
}
-
-
static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
- ns_dev *card;
- vc_map *vc;
- scq_info *scq;
- unsigned long buflen;
- ns_scqe scqe;
- u32 flags; /* TBD flags, not CPU flags */
-
- card = vcc->dev->dev_data;
- TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
- if ((vc = (vc_map *) vcc->dev_data) == NULL)
- {
- printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
- atomic_inc(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- if (!vc->tx)
- {
- printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
- atomic_inc(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
- {
- printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
- atomic_inc(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- if (skb_shinfo(skb)->nr_frags != 0)
- {
- printk("nicstar%d: No scatter-gather yet.\n", card->index);
- atomic_inc(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- ATM_SKB(skb)->vcc = vcc;
-
- if (vcc->qos.aal == ATM_AAL5)
- {
- buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
- flags = NS_TBD_AAL5;
- scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data));
- scqe.word_3 = cpu_to_le32((u32) skb->len);
- scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
- ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? 1 : 0);
- flags |= NS_TBD_EOPDU;
- }
- else /* (vcc->qos.aal == ATM_AAL0) */
- {
- buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */
- flags = NS_TBD_AAL0;
- scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER);
- scqe.word_3 = cpu_to_le32(0x00000000);
- if (*skb->data & 0x02) /* Payload type 1 - end of pdu */
- flags |= NS_TBD_EOPDU;
- scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
- /* Force the VPI/VCI to be the same as in VCC struct */
- scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT |
- ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) &
- NS_TBD_VC_MASK);
- }
-
- if (vcc->qos.txtp.traffic_class == ATM_CBR)
- {
- scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
- scq = ((vc_map *) vcc->dev_data)->scq;
- }
- else
- {
- scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
- scq = card->scq0;
- }
-
- if (push_scqe(card, vc, scq, &scqe, skb) != 0)
- {
- atomic_inc(&vcc->stats->tx_err);
- dev_kfree_skb_any(skb);
- return -EIO;
- }
- atomic_inc(&vcc->stats->tx);
-
- return 0;
-}
-
+ ns_dev *card;
+ vc_map *vc;
+ scq_info *scq;
+ unsigned long buflen;
+ ns_scqe scqe;
+ u32 flags; /* TBD flags, not CPU flags */
+
+ card = vcc->dev->dev_data;
+ TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
+ if ((vc = (vc_map *) vcc->dev_data) == NULL) {
+ printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
+ card->index);
+ atomic_inc(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+ if (!vc->tx) {
+ printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
+ card->index);
+ atomic_inc(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
-static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
- struct sk_buff *skb)
-{
- unsigned long flags;
- ns_scqe tsr;
- u32 scdi, scqi;
- int scq_is_vbr;
- u32 data;
- int index;
-
- spin_lock_irqsave(&scq->lock, flags);
- while (scq->tail == scq->next)
- {
- if (in_interrupt()) {
- spin_unlock_irqrestore(&scq->lock, flags);
- printk("nicstar%d: Error pushing TBD.\n", card->index);
- return 1;
- }
-
- scq->full = 1;
- spin_unlock_irqrestore(&scq->lock, flags);
- interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
- spin_lock_irqsave(&scq->lock, flags);
-
- if (scq->full) {
- spin_unlock_irqrestore(&scq->lock, flags);
- printk("nicstar%d: Timeout pushing TBD.\n", card->index);
- return 1;
- }
- }
- *scq->next = *tbd;
- index = (int) (scq->next - scq->base);
- scq->skb[index] = skb;
- XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n",
- card->index, (u32) skb, index);
- XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
- card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
- le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
- (u32) scq->next);
- if (scq->next == scq->last)
- scq->next = scq->base;
- else
- scq->next++;
-
- vc->tbd_count++;
- if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
- {
- scq->tbd_count++;
- scq_is_vbr = 1;
- }
- else
- scq_is_vbr = 0;
-
- if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ)
- {
- int has_run = 0;
-
- while (scq->tail == scq->next)
- {
- if (in_interrupt()) {
- data = (u32) virt_to_bus(scq->next);
- ns_write_sram(card, scq->scd, &data, 1);
- spin_unlock_irqrestore(&scq->lock, flags);
- printk("nicstar%d: Error pushing TSR.\n", card->index);
- return 0;
- }
-
- scq->full = 1;
- if (has_run++) break;
- spin_unlock_irqrestore(&scq->lock, flags);
- interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
- spin_lock_irqsave(&scq->lock, flags);
- }
-
- if (!scq->full)
- {
- tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
- if (scq_is_vbr)
- scdi = NS_TSR_SCDISVBR;
- else
- scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
- scqi = scq->next - scq->base;
- tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
- tsr.word_3 = 0x00000000;
- tsr.word_4 = 0x00000000;
-
- *scq->next = tsr;
- index = (int) scqi;
- scq->skb[index] = NULL;
- XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
- card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2),
- le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4),
- (u32) scq->next);
- if (scq->next == scq->last)
- scq->next = scq->base;
- else
- scq->next++;
- vc->tbd_count = 0;
- scq->tbd_count = 0;
- }
- else
- PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index);
- }
- data = (u32) virt_to_bus(scq->next);
- ns_write_sram(card, scq->scd, &data, 1);
-
- spin_unlock_irqrestore(&scq->lock, flags);
-
- return 0;
-}
+ if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
+ printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
+ card->index);
+ atomic_inc(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ printk("nicstar%d: No scatter-gather yet.\n", card->index);
+ atomic_inc(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ ATM_SKB(skb)->vcc = vcc;
+
+ NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+
+ if (vcc->qos.aal == ATM_AAL5) {
+ buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
+ flags = NS_TBD_AAL5;
+ scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb));
+ scqe.word_3 = cpu_to_le32(skb->len);
+ scqe.word_4 =
+ ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
+ ATM_SKB(skb)->
+ atm_options & ATM_ATMOPT_CLP ? 1 : 0);
+ flags |= NS_TBD_EOPDU;
+ } else { /* (vcc->qos.aal == ATM_AAL0) */
+
+ buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */
+ flags = NS_TBD_AAL0;
+ scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER);
+ scqe.word_3 = cpu_to_le32(0x00000000);
+ if (*skb->data & 0x02) /* Payload type 1 - end of pdu */
+ flags |= NS_TBD_EOPDU;
+ scqe.word_4 =
+ cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
+ /* Force the VPI/VCI to be the same as in VCC struct */
+ scqe.word_4 |=
+ cpu_to_le32((((u32) vcc->
+ vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc->
+ vci) <<
+ NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK);
+ }
+
+ if (vcc->qos.txtp.traffic_class == ATM_CBR) {
+ scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
+ scq = ((vc_map *) vcc->dev_data)->scq;
+ } else {
+ scqe.word_1 =
+ ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
+ scq = card->scq0;
+ }
+
+ if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+ atomic_inc(&vcc->stats->tx_err);
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+ atomic_inc(&vcc->stats->tx);
+ return 0;
+}
-static void process_tsq(ns_dev *card)
+static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
+ struct sk_buff *skb)
{
- u32 scdi;
- scq_info *scq;
- ns_tsi *previous = NULL, *one_ahead, *two_ahead;
- int serviced_entries; /* flag indicating at least on entry was serviced */
-
- serviced_entries = 0;
-
- if (card->tsq.next == card->tsq.last)
- one_ahead = card->tsq.base;
- else
- one_ahead = card->tsq.next + 1;
-
- if (one_ahead == card->tsq.last)
- two_ahead = card->tsq.base;
- else
- two_ahead = one_ahead + 1;
-
- while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
- !ns_tsi_isempty(two_ahead))
- /* At most two empty, as stated in the 77201 errata */
- {
- serviced_entries = 1;
-
- /* Skip the one or two possible empty entries */
- while (ns_tsi_isempty(card->tsq.next)) {
- if (card->tsq.next == card->tsq.last)
- card->tsq.next = card->tsq.base;
- else
- card->tsq.next++;
- }
-
- if (!ns_tsi_tmrof(card->tsq.next))
- {
- scdi = ns_tsi_getscdindex(card->tsq.next);
- if (scdi == NS_TSI_SCDISVBR)
- scq = card->scq0;
- else
- {
- if (card->scd2vc[scdi] == NULL)
- {
- printk("nicstar%d: could not find VC from SCD index.\n",
- card->index);
- ns_tsi_init(card->tsq.next);
- return;
- }
- scq = card->scd2vc[scdi]->scq;
- }
- drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
- scq->full = 0;
- wake_up_interruptible(&(scq->scqfull_waitq));
- }
-
- ns_tsi_init(card->tsq.next);
- previous = card->tsq.next;
- if (card->tsq.next == card->tsq.last)
- card->tsq.next = card->tsq.base;
- else
- card->tsq.next++;
-
- if (card->tsq.next == card->tsq.last)
- one_ahead = card->tsq.base;
- else
- one_ahead = card->tsq.next + 1;
-
- if (one_ahead == card->tsq.last)
- two_ahead = card->tsq.base;
- else
- two_ahead = one_ahead + 1;
- }
-
- if (serviced_entries) {
- writel((((u32) previous) - ((u32) card->tsq.base)),
- card->membase + TSQH);
- }
+ unsigned long flags;
+ ns_scqe tsr;
+ u32 scdi, scqi;
+ int scq_is_vbr;
+ u32 data;
+ int index;
+
+ spin_lock_irqsave(&scq->lock, flags);
+ while (scq->tail == scq->next) {
+ if (in_interrupt()) {
+ spin_unlock_irqrestore(&scq->lock, flags);
+ printk("nicstar%d: Error pushing TBD.\n", card->index);
+ return 1;
+ }
+
+ scq->full = 1;
+ spin_unlock_irqrestore(&scq->lock, flags);
+ interruptible_sleep_on_timeout(&scq->scqfull_waitq,
+ SCQFULL_TIMEOUT);
+ spin_lock_irqsave(&scq->lock, flags);
+
+ if (scq->full) {
+ spin_unlock_irqrestore(&scq->lock, flags);
+ printk("nicstar%d: Timeout pushing TBD.\n",
+ card->index);
+ return 1;
+ }
+ }
+ *scq->next = *tbd;
+ index = (int)(scq->next - scq->base);
+ scq->skb[index] = skb;
+ XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n",
+ card->index, skb, index);
+ XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
+ card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
+ le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
+ scq->next);
+ if (scq->next == scq->last)
+ scq->next = scq->base;
+ else
+ scq->next++;
+
+ vc->tbd_count++;
+ if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) {
+ scq->tbd_count++;
+ scq_is_vbr = 1;
+ } else
+ scq_is_vbr = 0;
+
+ if (vc->tbd_count >= MAX_TBD_PER_VC
+ || scq->tbd_count >= MAX_TBD_PER_SCQ) {
+ int has_run = 0;
+
+ while (scq->tail == scq->next) {
+ if (in_interrupt()) {
+ data = scq_virt_to_bus(scq, scq->next);
+ ns_write_sram(card, scq->scd, &data, 1);
+ spin_unlock_irqrestore(&scq->lock, flags);
+ printk("nicstar%d: Error pushing TSR.\n",
+ card->index);
+ return 0;
+ }
+
+ scq->full = 1;
+ if (has_run++)
+ break;
+ spin_unlock_irqrestore(&scq->lock, flags);
+ interruptible_sleep_on_timeout(&scq->scqfull_waitq,
+ SCQFULL_TIMEOUT);
+ spin_lock_irqsave(&scq->lock, flags);
+ }
+
+ if (!scq->full) {
+ tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
+ if (scq_is_vbr)
+ scdi = NS_TSR_SCDISVBR;
+ else
+ scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
+ scqi = scq->next - scq->base;
+ tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
+ tsr.word_3 = 0x00000000;
+ tsr.word_4 = 0x00000000;
+
+ *scq->next = tsr;
+ index = (int)scqi;
+ scq->skb[index] = NULL;
+ XPRINTK
+ ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
+ card->index, le32_to_cpu(tsr.word_1),
+ le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3),
+ le32_to_cpu(tsr.word_4), scq->next);
+ if (scq->next == scq->last)
+ scq->next = scq->base;
+ else
+ scq->next++;
+ vc->tbd_count = 0;
+ scq->tbd_count = 0;
+ } else
+ PRINTK("nicstar%d: Timeout pushing TSR.\n",
+ card->index);
+ }
+ data = scq_virt_to_bus(scq, scq->next);
+ ns_write_sram(card, scq->scd, &data, 1);
+
+ spin_unlock_irqrestore(&scq->lock, flags);
+
+ return 0;
}
-
-
-static void drain_scq(ns_dev *card, scq_info *scq, int pos)
+static void process_tsq(ns_dev * card)
{
- struct atm_vcc *vcc;
- struct sk_buff *skb;
- int i;
- unsigned long flags;
-
- XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n",
- card->index, (u32) scq, pos);
- if (pos >= scq->num_entries)
- {
- printk("nicstar%d: Bad index on drain_scq().\n", card->index);
- return;
- }
-
- spin_lock_irqsave(&scq->lock, flags);
- i = (int) (scq->tail - scq->base);
- if (++i == scq->num_entries)
- i = 0;
- while (i != pos)
- {
- skb = scq->skb[i];
- XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n",
- card->index, (u32) skb, i);
- if (skb != NULL)
- {
- vcc = ATM_SKB(skb)->vcc;
- if (vcc && vcc->pop != NULL) {
- vcc->pop(vcc, skb);
- } else {
- dev_kfree_skb_irq(skb);
- }
- scq->skb[i] = NULL;
- }
- if (++i == scq->num_entries)
- i = 0;
- }
- scq->tail = scq->base + pos;
- spin_unlock_irqrestore(&scq->lock, flags);
+ u32 scdi;
+ scq_info *scq;
+ ns_tsi *previous = NULL, *one_ahead, *two_ahead;
+ int serviced_entries; /* flag indicating at least on entry was serviced */
+
+ serviced_entries = 0;
+
+ if (card->tsq.next == card->tsq.last)
+ one_ahead = card->tsq.base;
+ else
+ one_ahead = card->tsq.next + 1;
+
+ if (one_ahead == card->tsq.last)
+ two_ahead = card->tsq.base;
+ else
+ two_ahead = one_ahead + 1;
+
+ while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
+ !ns_tsi_isempty(two_ahead))
+ /* At most two empty, as stated in the 77201 errata */
+ {
+ serviced_entries = 1;
+
+ /* Skip the one or two possible empty entries */
+ while (ns_tsi_isempty(card->tsq.next)) {
+ if (card->tsq.next == card->tsq.last)
+ card->tsq.next = card->tsq.base;
+ else
+ card->tsq.next++;
+ }
+
+ if (!ns_tsi_tmrof(card->tsq.next)) {
+ scdi = ns_tsi_getscdindex(card->tsq.next);
+ if (scdi == NS_TSI_SCDISVBR)
+ scq = card->scq0;
+ else {
+ if (card->scd2vc[scdi] == NULL) {
+ printk
+ ("nicstar%d: could not find VC from SCD index.\n",
+ card->index);
+ ns_tsi_init(card->tsq.next);
+ return;
+ }
+ scq = card->scd2vc[scdi]->scq;
+ }
+ drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
+ scq->full = 0;
+ wake_up_interruptible(&(scq->scqfull_waitq));
+ }
+
+ ns_tsi_init(card->tsq.next);
+ previous = card->tsq.next;
+ if (card->tsq.next == card->tsq.last)
+ card->tsq.next = card->tsq.base;
+ else
+ card->tsq.next++;
+
+ if (card->tsq.next == card->tsq.last)
+ one_ahead = card->tsq.base;
+ else
+ one_ahead = card->tsq.next + 1;
+
+ if (one_ahead == card->tsq.last)
+ two_ahead = card->tsq.base;
+ else
+ two_ahead = one_ahead + 1;
+ }
+
+ if (serviced_entries)
+ writel(PTR_DIFF(previous, card->tsq.base),
+ card->membase + TSQH);
}
+static void drain_scq(ns_dev * card, scq_info * scq, int pos)
+{
+ struct atm_vcc *vcc;
+ struct sk_buff *skb;
+ int i;
+ unsigned long flags;
+
+ XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n",
+ card->index, scq, pos);
+ if (pos >= scq->num_entries) {
+ printk("nicstar%d: Bad index on drain_scq().\n", card->index);
+ return;
+ }
+
+ spin_lock_irqsave(&scq->lock, flags);
+ i = (int)(scq->tail - scq->base);
+ if (++i == scq->num_entries)
+ i = 0;
+ while (i != pos) {
+ skb = scq->skb[i];
+ XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
+ card->index, skb, i);
+ if (skb != NULL) {
+ pci_unmap_single(card->pcidev,
+ NS_PRV_DMA(skb),
+ skb->len,
+ PCI_DMA_TODEVICE);
+ vcc = ATM_SKB(skb)->vcc;
+ if (vcc && vcc->pop != NULL) {
+ vcc->pop(vcc, skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+ scq->skb[i] = NULL;
+ }
+ if (++i == scq->num_entries)
+ i = 0;
+ }
+ scq->tail = scq->base + pos;
+ spin_unlock_irqrestore(&scq->lock, flags);
+}
-
-static void process_rsq(ns_dev *card)
+static void process_rsq(ns_dev * card)
{
- ns_rsqe *previous;
-
- if (!ns_rsqe_valid(card->rsq.next))
- return;
- do {
- dequeue_rx(card, card->rsq.next);
- ns_rsqe_init(card->rsq.next);
- previous = card->rsq.next;
- if (card->rsq.next == card->rsq.last)
- card->rsq.next = card->rsq.base;
- else
- card->rsq.next++;
- } while (ns_rsqe_valid(card->rsq.next));
- writel((((u32) previous) - ((u32) card->rsq.base)),
- card->membase + RSQH);
+ ns_rsqe *previous;
+
+ if (!ns_rsqe_valid(card->rsq.next))
+ return;
+ do {
+ dequeue_rx(card, card->rsq.next);
+ ns_rsqe_init(card->rsq.next);
+ previous = card->rsq.next;
+ if (card->rsq.next == card->rsq.last)
+ card->rsq.next = card->rsq.base;
+ else
+ card->rsq.next++;
+ } while (ns_rsqe_valid(card->rsq.next));
+ writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH);
}
+static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
+{
+ u32 vpi, vci;
+ vc_map *vc;
+ struct sk_buff *iovb;
+ struct iovec *iov;
+ struct atm_vcc *vcc;
+ struct sk_buff *skb;
+ unsigned short aal5_len;
+ int len;
+ u32 stat;
+ u32 id;
+
+ stat = readl(card->membase + STAT);
+ card->sbfqc = ns_stat_sfbqc_get(stat);
+ card->lbfqc = ns_stat_lfbqc_get(stat);
+
+ id = le32_to_cpu(rsqe->buffer_handle);
+ skb = idr_find(&card->idr, id);
+ if (!skb) {
+ RXPRINTK(KERN_ERR
+ "nicstar%d: idr_find() failed!\n", card->index);
+ return;
+ }
+ idr_remove(&card->idr, id);
+ pci_dma_sync_single_for_cpu(card->pcidev,
+ NS_PRV_DMA(skb),
+ (NS_PRV_BUFTYPE(skb) == BUF_SM
+ ? NS_SMSKBSIZE : NS_LGSKBSIZE),
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_single(card->pcidev,
+ NS_PRV_DMA(skb),
+ (NS_PRV_BUFTYPE(skb) == BUF_SM
+ ? NS_SMSKBSIZE : NS_LGSKBSIZE),
+ PCI_DMA_FROMDEVICE);
+ vpi = ns_rsqe_vpi(rsqe);
+ vci = ns_rsqe_vci(rsqe);
+ if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {
+ printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
+ card->index, vpi, vci);
+ recycle_rx_buf(card, skb);
+ return;
+ }
+
+ vc = &(card->vcmap[vpi << card->vcibits | vci]);
+ if (!vc->rx) {
+ RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
+ card->index, vpi, vci);
+ recycle_rx_buf(card, skb);
+ return;
+ }
+
+ vcc = vc->rx_vcc;
+
+ if (vcc->qos.aal == ATM_AAL0) {
+ struct sk_buff *sb;
+ unsigned char *cell;
+ int i;
+
+ cell = skb->data;
+ for (i = ns_rsqe_cellcount(rsqe); i; i--) {
+ if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) {
+ printk
+ ("nicstar%d: Can't allocate buffers for aal0.\n",
+ card->index);
+ atomic_add(i, &vcc->stats->rx_drop);
+ break;
+ }
+ if (!atm_charge(vcc, sb->truesize)) {
+ RXPRINTK
+ ("nicstar%d: atm_charge() dropped aal0 packets.\n",
+ card->index);
+ atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
+ dev_kfree_skb_any(sb);
+ break;
+ }
+ /* Rebuild the header */
+ *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
+ (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
+ if (i == 1 && ns_rsqe_eopdu(rsqe))
+ *((u32 *) sb->data) |= 0x00000002;
+ skb_put(sb, NS_AAL0_HEADER);
+ memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
+ skb_put(sb, ATM_CELL_PAYLOAD);
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+ atomic_inc(&vcc->stats->rx);
+ cell += ATM_CELL_PAYLOAD;
+ }
+
+ recycle_rx_buf(card, skb);
+ return;
+ }
+
+ /* To reach this point, the AAL layer can only be AAL5 */
+
+ if ((iovb = vc->rx_iov) == NULL) {
+ iovb = skb_dequeue(&(card->iovpool.queue));
+ if (iovb == NULL) { /* No buffers in the queue */
+ iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
+ if (iovb == NULL) {
+ printk("nicstar%d: Out of iovec buffers.\n",
+ card->index);
+ atomic_inc(&vcc->stats->rx_drop);
+ recycle_rx_buf(card, skb);
+ return;
+ }
+ NS_PRV_BUFTYPE(iovb) = BUF_NONE;
+ } else if (--card->iovpool.count < card->iovnr.min) {
+ struct sk_buff *new_iovb;
+ if ((new_iovb =
+ alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) {
+ NS_PRV_BUFTYPE(iovb) = BUF_NONE;
+ skb_queue_tail(&card->iovpool.queue, new_iovb);
+ card->iovpool.count++;
+ }
+ }
+ vc->rx_iov = iovb;
+ NS_PRV_IOVCNT(iovb) = 0;
+ iovb->len = 0;
+ iovb->data = iovb->head;
+ skb_reset_tail_pointer(iovb);
+ /* IMPORTANT: a pointer to the sk_buff containing the small or large
+ buffer is stored as iovec base, NOT a pointer to the
+ small or large buffer itself. */
+ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
+ printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
+ atomic_inc(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_MAX_IOVECS);
+ NS_PRV_IOVCNT(iovb) = 0;
+ iovb->len = 0;
+ iovb->data = iovb->head;
+ skb_reset_tail_pointer(iovb);
+ }
+ iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++];
+ iov->iov_base = (void *)skb;
+ iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
+ iovb->len += iov->iov_len;
+#ifdef EXTRA_DEBUG
+ if (NS_PRV_IOVCNT(iovb) == 1) {
+ if (NS_PRV_BUFTYPE(skb) != BUF_SM) {
+ printk
+ ("nicstar%d: Expected a small buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+ atomic_inc(&vcc->stats->rx_err);
+ recycle_rx_buf(card, skb);
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+ return;
+ }
+ } else { /* NS_PRV_IOVCNT(iovb) >= 2 */
+
+ if (NS_PRV_BUFTYPE(skb) != BUF_LG) {
+ printk
+ ("nicstar%d: Expected a large buffer, and this is not one.\n",
+ card->index);
+ which_list(card, skb);
+ atomic_inc(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+ return;
+ }
+ }
+#endif /* EXTRA_DEBUG */
-static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
-{
- u32 vpi, vci;
- vc_map *vc;
- struct sk_buff *iovb;
- struct iovec *iov;
- struct atm_vcc *vcc;
- struct sk_buff *skb;
- unsigned short aal5_len;
- int len;
- u32 stat;
-
- stat = readl(card->membase + STAT);
- card->sbfqc = ns_stat_sfbqc_get(stat);
- card->lbfqc = ns_stat_lfbqc_get(stat);
-
- skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle);
- vpi = ns_rsqe_vpi(rsqe);
- vci = ns_rsqe_vci(rsqe);
- if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits)
- {
- printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
- card->index, vpi, vci);
- recycle_rx_buf(card, skb);
- return;
- }
-
- vc = &(card->vcmap[vpi << card->vcibits | vci]);
- if (!vc->rx)
- {
- RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
- card->index, vpi, vci);
- recycle_rx_buf(card, skb);
- return;
- }
-
- vcc = vc->rx_vcc;
-
- if (vcc->qos.aal == ATM_AAL0)
- {
- struct sk_buff *sb;
- unsigned char *cell;
- int i;
-
- cell = skb->data;
- for (i = ns_rsqe_cellcount(rsqe); i; i--)
- {
- if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL)
- {
- printk("nicstar%d: Can't allocate buffers for aal0.\n",
- card->index);
- atomic_add(i,&vcc->stats->rx_drop);
- break;
- }
- if (!atm_charge(vcc, sb->truesize))
- {
- RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
- card->index);
- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
- dev_kfree_skb_any(sb);
- break;
- }
- /* Rebuild the header */
- *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
- (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
- if (i == 1 && ns_rsqe_eopdu(rsqe))
- *((u32 *) sb->data) |= 0x00000002;
- skb_put(sb, NS_AAL0_HEADER);
- memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
- skb_put(sb, ATM_CELL_PAYLOAD);
- ATM_SKB(sb)->vcc = vcc;
- __net_timestamp(sb);
- vcc->push(vcc, sb);
- atomic_inc(&vcc->stats->rx);
- cell += ATM_CELL_PAYLOAD;
- }
-
- recycle_rx_buf(card, skb);
- return;
- }
-
- /* To reach this point, the AAL layer can only be AAL5 */
-
- if ((iovb = vc->rx_iov) == NULL)
- {
- iovb = skb_dequeue(&(card->iovpool.queue));
- if (iovb == NULL) /* No buffers in the queue */
- {
- iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
- if (iovb == NULL)
- {
- printk("nicstar%d: Out of iovec buffers.\n", card->index);
- atomic_inc(&vcc->stats->rx_drop);
- recycle_rx_buf(card, skb);
- return;
- }
- NS_SKB_CB(iovb)->buf_type = BUF_NONE;
- }
- else
- if (--card->iovpool.count < card->iovnr.min)
- {
- struct sk_buff *new_iovb;
- if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
- {
- NS_SKB_CB(iovb)->buf_type = BUF_NONE;
- skb_queue_tail(&card->iovpool.queue, new_iovb);
- card->iovpool.count++;
- }
- }
- vc->rx_iov = iovb;
- NS_SKB(iovb)->iovcnt = 0;
- iovb->len = 0;
- iovb->data = iovb->head;
- skb_reset_tail_pointer(iovb);
- NS_SKB(iovb)->vcc = vcc;
- /* IMPORTANT: a pointer to the sk_buff containing the small or large
- buffer is stored as iovec base, NOT a pointer to the
- small or large buffer itself. */
- }
- else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
- {
- printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
- atomic_inc(&vcc->stats->rx_err);
- recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
- NS_SKB(iovb)->iovcnt = 0;
- iovb->len = 0;
- iovb->data = iovb->head;
- skb_reset_tail_pointer(iovb);
- NS_SKB(iovb)->vcc = vcc;
- }
- iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++];
- iov->iov_base = (void *) skb;
- iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
- iovb->len += iov->iov_len;
-
- if (NS_SKB(iovb)->iovcnt == 1)
- {
- if (NS_SKB_CB(skb)->buf_type != BUF_SM)
- {
- printk("nicstar%d: Expected a small buffer, and this is not one.\n",
- card->index);
- which_list(card, skb);
- atomic_inc(&vcc->stats->rx_err);
- recycle_rx_buf(card, skb);
- vc->rx_iov = NULL;
- recycle_iov_buf(card, iovb);
- return;
- }
- }
- else /* NS_SKB(iovb)->iovcnt >= 2 */
- {
- if (NS_SKB_CB(skb)->buf_type != BUF_LG)
- {
- printk("nicstar%d: Expected a large buffer, and this is not one.\n",
- card->index);
- which_list(card, skb);
- atomic_inc(&vcc->stats->rx_err);
- recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
- NS_SKB(iovb)->iovcnt);
- vc->rx_iov = NULL;
- recycle_iov_buf(card, iovb);
- return;
- }
- }
-
- if (ns_rsqe_eopdu(rsqe))
- {
- /* This works correctly regardless of the endianness of the host */
- unsigned char *L1L2 = (unsigned char *)((u32)skb->data +
- iov->iov_len - 6);
- aal5_len = L1L2[0] << 8 | L1L2[1];
- len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
- if (ns_rsqe_crcerr(rsqe) ||
- len + 8 > iovb->len || len + (47 + 8) < iovb->len)
- {
- printk("nicstar%d: AAL5 CRC error", card->index);
- if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
- printk(" - PDU size mismatch.\n");
- else
- printk(".\n");
- atomic_inc(&vcc->stats->rx_err);
- recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
- NS_SKB(iovb)->iovcnt);
- vc->rx_iov = NULL;
- recycle_iov_buf(card, iovb);
- return;
- }
-
- /* By this point we (hopefully) have a complete SDU without errors. */
-
- if (NS_SKB(iovb)->iovcnt == 1) /* Just a small buffer */
- {
- /* skb points to a small buffer */
- if (!atm_charge(vcc, skb->truesize))
- {
- push_rxbufs(card, skb);
- atomic_inc(&vcc->stats->rx_drop);
- }
- else
- {
- skb_put(skb, len);
- dequeue_sm_buf(card, skb);
+ if (ns_rsqe_eopdu(rsqe)) {
+ /* This works correctly regardless of the endianness of the host */
+ unsigned char *L1L2 = (unsigned char *)
+ (skb->data + iov->iov_len - 6);
+ aal5_len = L1L2[0] << 8 | L1L2[1];
+ len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
+ if (ns_rsqe_crcerr(rsqe) ||
+ len + 8 > iovb->len || len + (47 + 8) < iovb->len) {
+ printk("nicstar%d: AAL5 CRC error", card->index);
+ if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
+ printk(" - PDU size mismatch.\n");
+ else
+ printk(".\n");
+ atomic_inc(&vcc->stats->rx_err);
+ recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+ return;
+ }
+
+ /* By this point we (hopefully) have a complete SDU without errors. */
+
+ if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */
+ /* skb points to a small buffer */
+ if (!atm_charge(vcc, skb->truesize)) {
+ push_rxbufs(card, skb);
+ atomic_inc(&vcc->stats->rx_drop);
+ } else {
+ skb_put(skb, len);
+ dequeue_sm_buf(card, skb);
#ifdef NS_USE_DESTRUCTORS
- skb->destructor = ns_sb_destructor;
+ skb->destructor = ns_sb_destructor;
#endif /* NS_USE_DESTRUCTORS */
- ATM_SKB(skb)->vcc = vcc;
- __net_timestamp(skb);
- vcc->push(vcc, skb);
- atomic_inc(&vcc->stats->rx);
- }
- }
- else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
- {
- struct sk_buff *sb;
-
- sb = (struct sk_buff *) (iov - 1)->iov_base;
- /* skb points to a large buffer */
-
- if (len <= NS_SMBUFSIZE)
- {
- if (!atm_charge(vcc, sb->truesize))
- {
- push_rxbufs(card, sb);
- atomic_inc(&vcc->stats->rx_drop);
- }
- else
- {
- skb_put(sb, len);
- dequeue_sm_buf(card, sb);
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+ atomic_inc(&vcc->stats->rx);
+ }
+ } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
+ struct sk_buff *sb;
+
+ sb = (struct sk_buff *)(iov - 1)->iov_base;
+ /* skb points to a large buffer */
+
+ if (len <= NS_SMBUFSIZE) {
+ if (!atm_charge(vcc, sb->truesize)) {
+ push_rxbufs(card, sb);
+ atomic_inc(&vcc->stats->rx_drop);
+ } else {
+ skb_put(sb, len);
+ dequeue_sm_buf(card, sb);
#ifdef NS_USE_DESTRUCTORS
- sb->destructor = ns_sb_destructor;
+ sb->destructor = ns_sb_destructor;
#endif /* NS_USE_DESTRUCTORS */
- ATM_SKB(sb)->vcc = vcc;
- __net_timestamp(sb);
- vcc->push(vcc, sb);
- atomic_inc(&vcc->stats->rx);
- }
-
- push_rxbufs(card, skb);
-
- }
- else /* len > NS_SMBUFSIZE, the usual case */
- {
- if (!atm_charge(vcc, skb->truesize))
- {
- push_rxbufs(card, skb);
- atomic_inc(&vcc->stats->rx_drop);
- }
- else
- {
- dequeue_lg_buf(card, skb);
+ ATM_SKB(sb)->vcc = vcc;
+ __net_timestamp(sb);
+ vcc->push(vcc, sb);
+ atomic_inc(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, skb);
+
+ } else { /* len > NS_SMBUFSIZE, the usual case */
+
+ if (!atm_charge(vcc, skb->truesize)) {
+ push_rxbufs(card, skb);
+ atomic_inc(&vcc->stats->rx_drop);
+ } else {
+ dequeue_lg_buf(card, skb);
#ifdef NS_USE_DESTRUCTORS
- skb->destructor = ns_lb_destructor;
+ skb->destructor = ns_lb_destructor;
#endif /* NS_USE_DESTRUCTORS */
- skb_push(skb, NS_SMBUFSIZE);
- skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE);
- skb_put(skb, len - NS_SMBUFSIZE);
- ATM_SKB(skb)->vcc = vcc;
- __net_timestamp(skb);
- vcc->push(vcc, skb);
- atomic_inc(&vcc->stats->rx);
- }
-
- push_rxbufs(card, sb);
-
- }
-
- }
- else /* Must push a huge buffer */
- {
- struct sk_buff *hb, *sb, *lb;
- int remaining, tocopy;
- int j;
-
- hb = skb_dequeue(&(card->hbpool.queue));
- if (hb == NULL) /* No buffers in the queue */
- {
-
- hb = dev_alloc_skb(NS_HBUFSIZE);
- if (hb == NULL)
- {
- printk("nicstar%d: Out of huge buffers.\n", card->index);
- atomic_inc(&vcc->stats->rx_drop);
- recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
- NS_SKB(iovb)->iovcnt);
- vc->rx_iov = NULL;
- recycle_iov_buf(card, iovb);
- return;
- }
- else if (card->hbpool.count < card->hbnr.min)
- {
- struct sk_buff *new_hb;
- if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
- {
- skb_queue_tail(&card->hbpool.queue, new_hb);
- card->hbpool.count++;
- }
- }
- NS_SKB_CB(hb)->buf_type = BUF_NONE;
- }
- else
- if (--card->hbpool.count < card->hbnr.min)
- {
- struct sk_buff *new_hb;
- if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
- {
- NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
- skb_queue_tail(&card->hbpool.queue, new_hb);
- card->hbpool.count++;
- }
- if (card->hbpool.count < card->hbnr.min)
- {
- if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
- {
- NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
- skb_queue_tail(&card->hbpool.queue, new_hb);
- card->hbpool.count++;
- }
- }
- }
-
- iov = (struct iovec *) iovb->data;
-
- if (!atm_charge(vcc, hb->truesize))
- {
- recycle_iovec_rx_bufs(card, iov, NS_SKB(iovb)->iovcnt);
- if (card->hbpool.count < card->hbnr.max)
- {
- skb_queue_tail(&card->hbpool.queue, hb);
- card->hbpool.count++;
- }
- else
- dev_kfree_skb_any(hb);
- atomic_inc(&vcc->stats->rx_drop);
- }
- else
- {
- /* Copy the small buffer to the huge buffer */
- sb = (struct sk_buff *) iov->iov_base;
- skb_copy_from_linear_data(sb, hb->data, iov->iov_len);
- skb_put(hb, iov->iov_len);
- remaining = len - iov->iov_len;
- iov++;
- /* Free the small buffer */
- push_rxbufs(card, sb);
-
- /* Copy all large buffers to the huge buffer and free them */
- for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
- {
- lb = (struct sk_buff *) iov->iov_base;
- tocopy = min_t(int, remaining, iov->iov_len);
- skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy);
- skb_put(hb, tocopy);
- iov++;
- remaining -= tocopy;
- push_rxbufs(card, lb);
- }
+ skb_push(skb, NS_SMBUFSIZE);
+ skb_copy_from_linear_data(sb, skb->data,
+ NS_SMBUFSIZE);
+ skb_put(skb, len - NS_SMBUFSIZE);
+ ATM_SKB(skb)->vcc = vcc;
+ __net_timestamp(skb);
+ vcc->push(vcc, skb);
+ atomic_inc(&vcc->stats->rx);
+ }
+
+ push_rxbufs(card, sb);
+
+ }
+
+ } else { /* Must push a huge buffer */
+
+ struct sk_buff *hb, *sb, *lb;
+ int remaining, tocopy;
+ int j;
+
+ hb = skb_dequeue(&(card->hbpool.queue));
+ if (hb == NULL) { /* No buffers in the queue */
+
+ hb = dev_alloc_skb(NS_HBUFSIZE);
+ if (hb == NULL) {
+ printk
+ ("nicstar%d: Out of huge buffers.\n",
+ card->index);
+ atomic_inc(&vcc->stats->rx_drop);
+ recycle_iovec_rx_bufs(card,
+ (struct iovec *)
+ iovb->data,
+ NS_PRV_IOVCNT(iovb));
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+ return;
+ } else if (card->hbpool.count < card->hbnr.min) {
+ struct sk_buff *new_hb;
+ if ((new_hb =
+ dev_alloc_skb(NS_HBUFSIZE)) !=
+ NULL) {
+ skb_queue_tail(&card->hbpool.
+ queue, new_hb);
+ card->hbpool.count++;
+ }
+ }
+ NS_PRV_BUFTYPE(hb) = BUF_NONE;
+ } else if (--card->hbpool.count < card->hbnr.min) {
+ struct sk_buff *new_hb;
+ if ((new_hb =
+ dev_alloc_skb(NS_HBUFSIZE)) != NULL) {
+ NS_PRV_BUFTYPE(new_hb) = BUF_NONE;
+ skb_queue_tail(&card->hbpool.queue,
+ new_hb);
+ card->hbpool.count++;
+ }
+ if (card->hbpool.count < card->hbnr.min) {
+ if ((new_hb =
+ dev_alloc_skb(NS_HBUFSIZE)) !=
+ NULL) {
+ NS_PRV_BUFTYPE(new_hb) =
+ BUF_NONE;
+ skb_queue_tail(&card->hbpool.
+ queue, new_hb);
+ card->hbpool.count++;
+ }
+ }
+ }
+
+ iov = (struct iovec *)iovb->data;
+
+ if (!atm_charge(vcc, hb->truesize)) {
+ recycle_iovec_rx_bufs(card, iov,
+ NS_PRV_IOVCNT(iovb));
+ if (card->hbpool.count < card->hbnr.max) {
+ skb_queue_tail(&card->hbpool.queue, hb);
+ card->hbpool.count++;
+ } else
+ dev_kfree_skb_any(hb);
+ atomic_inc(&vcc->stats->rx_drop);
+ } else {
+ /* Copy the small buffer to the huge buffer */
+ sb = (struct sk_buff *)iov->iov_base;
+ skb_copy_from_linear_data(sb, hb->data,
+ iov->iov_len);
+ skb_put(hb, iov->iov_len);
+ remaining = len - iov->iov_len;
+ iov++;
+ /* Free the small buffer */
+ push_rxbufs(card, sb);
+
+ /* Copy all large buffers to the huge buffer and free them */
+ for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) {
+ lb = (struct sk_buff *)iov->iov_base;
+ tocopy =
+ min_t(int, remaining, iov->iov_len);
+ skb_copy_from_linear_data(lb,
+ skb_tail_pointer
+ (hb), tocopy);
+ skb_put(hb, tocopy);
+ iov++;
+ remaining -= tocopy;
+ push_rxbufs(card, lb);
+ }
#ifdef EXTRA_DEBUG
- if (remaining != 0 || hb->len != len)
- printk("nicstar%d: Huge buffer len mismatch.\n", card->index);
+ if (remaining != 0 || hb->len != len)
+ printk
+ ("nicstar%d: Huge buffer len mismatch.\n",
+ card->index);
#endif /* EXTRA_DEBUG */
- ATM_SKB(hb)->vcc = vcc;
+ ATM_SKB(hb)->vcc = vcc;
#ifdef NS_USE_DESTRUCTORS
- hb->destructor = ns_hb_destructor;
+ hb->destructor = ns_hb_destructor;
#endif /* NS_USE_DESTRUCTORS */
- __net_timestamp(hb);
- vcc->push(vcc, hb);
- atomic_inc(&vcc->stats->rx);
- }
- }
+ __net_timestamp(hb);
+ vcc->push(vcc, hb);
+ atomic_inc(&vcc->stats->rx);
+ }
+ }
- vc->rx_iov = NULL;
- recycle_iov_buf(card, iovb);
- }
+ vc->rx_iov = NULL;
+ recycle_iov_buf(card, iovb);
+ }
}
-
-
#ifdef NS_USE_DESTRUCTORS
static void ns_sb_destructor(struct sk_buff *sb)
{
- ns_dev *card;
- u32 stat;
-
- card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
- stat = readl(card->membase + STAT);
- card->sbfqc = ns_stat_sfbqc_get(stat);
- card->lbfqc = ns_stat_lfbqc_get(stat);
-
- do
- {
- sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
- if (sb == NULL)
- break;
- NS_SKB_CB(sb)->buf_type = BUF_SM;
- skb_queue_tail(&card->sbpool.queue, sb);
- skb_reserve(sb, NS_AAL0_HEADER);
- push_rxbufs(card, sb);
- } while (card->sbfqc < card->sbnr.min);
+ ns_dev *card;
+ u32 stat;
+
+ card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
+ stat = readl(card->membase + STAT);
+ card->sbfqc = ns_stat_sfbqc_get(stat);
+ card->lbfqc = ns_stat_lfbqc_get(stat);
+
+ do {
+ sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
+ if (sb == NULL)
+ break;
+ NS_PRV_BUFTYPE(sb) = BUF_SM;
+ skb_queue_tail(&card->sbpool.queue, sb);
+ skb_reserve(sb, NS_AAL0_HEADER);
+ push_rxbufs(card, sb);
+ } while (card->sbfqc < card->sbnr.min);
}
-
-
static void ns_lb_destructor(struct sk_buff *lb)
{
- ns_dev *card;
- u32 stat;
-
- card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
- stat = readl(card->membase + STAT);
- card->sbfqc = ns_stat_sfbqc_get(stat);
- card->lbfqc = ns_stat_lfbqc_get(stat);
-
- do
- {
- lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
- if (lb == NULL)
- break;
- NS_SKB_CB(lb)->buf_type = BUF_LG;
- skb_queue_tail(&card->lbpool.queue, lb);
- skb_reserve(lb, NS_SMBUFSIZE);
- push_rxbufs(card, lb);
- } while (card->lbfqc < card->lbnr.min);
+ ns_dev *card;
+ u32 stat;
+
+ card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
+ stat = readl(card->membase + STAT);
+ card->sbfqc = ns_stat_sfbqc_get(stat);
+ card->lbfqc = ns_stat_lfbqc_get(stat);
+
+ do {
+ lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
+ if (lb == NULL)
+ break;
+ NS_PRV_BUFTYPE(lb) = BUF_LG;
+ skb_queue_tail(&card->lbpool.queue, lb);
+ skb_reserve(lb, NS_SMBUFSIZE);
+ push_rxbufs(card, lb);
+ } while (card->lbfqc < card->lbnr.min);
}
-
-
static void ns_hb_destructor(struct sk_buff *hb)
{
- ns_dev *card;
-
- card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
-
- while (card->hbpool.count < card->hbnr.init)
- {
- hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
- if (hb == NULL)
- break;
- NS_SKB_CB(hb)->buf_type = BUF_NONE;
- skb_queue_tail(&card->hbpool.queue, hb);
- card->hbpool.count++;
- }
+ ns_dev *card;
+
+ card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
+
+ while (card->hbpool.count < card->hbnr.init) {
+ hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
+ if (hb == NULL)
+ break;
+ NS_PRV_BUFTYPE(hb) = BUF_NONE;
+ skb_queue_tail(&card->hbpool.queue, hb);
+ card->hbpool.count++;
+ }
}
#endif /* NS_USE_DESTRUCTORS */
-
-static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
+static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
{
- struct ns_skb_cb *cb = NS_SKB_CB(skb);
-
- if (unlikely(cb->buf_type == BUF_NONE)) {
- printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
+ if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
+ printk("nicstar%d: What kind of rx buffer is this?\n",
+ card->index);
dev_kfree_skb_any(skb);
} else
push_rxbufs(card, skb);
}
-
-static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
+static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count)
{
while (count-- > 0)
- recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base);
+ recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base);
}
-
-static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
+static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
{
- if (card->iovpool.count < card->iovnr.max)
- {
- skb_queue_tail(&card->iovpool.queue, iovb);
- card->iovpool.count++;
- }
- else
- dev_kfree_skb_any(iovb);
+ if (card->iovpool.count < card->iovnr.max) {
+ skb_queue_tail(&card->iovpool.queue, iovb);
+ card->iovpool.count++;
+ } else
+ dev_kfree_skb_any(iovb);
}
-
-
-static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
+static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
{
- skb_unlink(sb, &card->sbpool.queue);
+ skb_unlink(sb, &card->sbpool.queue);
#ifdef NS_USE_DESTRUCTORS
- if (card->sbfqc < card->sbnr.min)
+ if (card->sbfqc < card->sbnr.min)
#else
- if (card->sbfqc < card->sbnr.init)
- {
- struct sk_buff *new_sb;
- if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
- {
- NS_SKB_CB(new_sb)->buf_type = BUF_SM;
- skb_queue_tail(&card->sbpool.queue, new_sb);
- skb_reserve(new_sb, NS_AAL0_HEADER);
- push_rxbufs(card, new_sb);
- }
- }
- if (card->sbfqc < card->sbnr.init)
+ if (card->sbfqc < card->sbnr.init) {
+ struct sk_buff *new_sb;
+ if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
+ NS_PRV_BUFTYPE(new_sb) = BUF_SM;
+ skb_queue_tail(&card->sbpool.queue, new_sb);
+ skb_reserve(new_sb, NS_AAL0_HEADER);
+ push_rxbufs(card, new_sb);
+ }
+ }
+ if (card->sbfqc < card->sbnr.init)
#endif /* NS_USE_DESTRUCTORS */
- {
- struct sk_buff *new_sb;
- if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
- {
- NS_SKB_CB(new_sb)->buf_type = BUF_SM;
- skb_queue_tail(&card->sbpool.queue, new_sb);
- skb_reserve(new_sb, NS_AAL0_HEADER);
- push_rxbufs(card, new_sb);
- }
- }
+ {
+ struct sk_buff *new_sb;
+ if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
+ NS_PRV_BUFTYPE(new_sb) = BUF_SM;
+ skb_queue_tail(&card->sbpool.queue, new_sb);
+ skb_reserve(new_sb, NS_AAL0_HEADER);
+ push_rxbufs(card, new_sb);
+ }
+ }
}
-
-
-static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
+static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
{
- skb_unlink(lb, &card->lbpool.queue);
+ skb_unlink(lb, &card->lbpool.queue);
#ifdef NS_USE_DESTRUCTORS
- if (card->lbfqc < card->lbnr.min)
+ if (card->lbfqc < card->lbnr.min)
#else
- if (card->lbfqc < card->lbnr.init)
- {
- struct sk_buff *new_lb;
- if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
- {
- NS_SKB_CB(new_lb)->buf_type = BUF_LG;
- skb_queue_tail(&card->lbpool.queue, new_lb);
- skb_reserve(new_lb, NS_SMBUFSIZE);
- push_rxbufs(card, new_lb);
- }
- }
- if (card->lbfqc < card->lbnr.init)
+ if (card->lbfqc < card->lbnr.init) {
+ struct sk_buff *new_lb;
+ if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
+ NS_PRV_BUFTYPE(new_lb) = BUF_LG;
+ skb_queue_tail(&card->lbpool.queue, new_lb);
+ skb_reserve(new_lb, NS_SMBUFSIZE);
+ push_rxbufs(card, new_lb);
+ }
+ }
+ if (card->lbfqc < card->lbnr.init)
#endif /* NS_USE_DESTRUCTORS */
- {
- struct sk_buff *new_lb;
- if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
- {
- NS_SKB_CB(new_lb)->buf_type = BUF_LG;
- skb_queue_tail(&card->lbpool.queue, new_lb);
- skb_reserve(new_lb, NS_SMBUFSIZE);
- push_rxbufs(card, new_lb);
- }
- }
+ {
+ struct sk_buff *new_lb;
+ if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
+ NS_PRV_BUFTYPE(new_lb) = BUF_LG;
+ skb_queue_tail(&card->lbpool.queue, new_lb);
+ skb_reserve(new_lb, NS_SMBUFSIZE);
+ push_rxbufs(card, new_lb);
+ }
+ }
}
-
-
-static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
+static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page)
{
- u32 stat;
- ns_dev *card;
- int left;
-
- left = (int) *pos;
- card = (ns_dev *) dev->dev_data;
- stat = readl(card->membase + STAT);
- if (!left--)
- return sprintf(page, "Pool count min init max \n");
- if (!left--)
- return sprintf(page, "Small %5d %5d %5d %5d \n",
- ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init,
- card->sbnr.max);
- if (!left--)
- return sprintf(page, "Large %5d %5d %5d %5d \n",
- ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init,
- card->lbnr.max);
- if (!left--)
- return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count,
- card->hbnr.min, card->hbnr.init, card->hbnr.max);
- if (!left--)
- return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count,
- card->iovnr.min, card->iovnr.init, card->iovnr.max);
- if (!left--)
- {
- int retval;
- retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt);
- card->intcnt = 0;
- return retval;
- }
+ u32 stat;
+ ns_dev *card;
+ int left;
+
+ left = (int)*pos;
+ card = (ns_dev *) dev->dev_data;
+ stat = readl(card->membase + STAT);
+ if (!left--)
+ return sprintf(page, "Pool count min init max \n");
+ if (!left--)
+ return sprintf(page, "Small %5d %5d %5d %5d \n",
+ ns_stat_sfbqc_get(stat), card->sbnr.min,
+ card->sbnr.init, card->sbnr.max);
+ if (!left--)
+ return sprintf(page, "Large %5d %5d %5d %5d \n",
+ ns_stat_lfbqc_get(stat), card->lbnr.min,
+ card->lbnr.init, card->lbnr.max);
+ if (!left--)
+ return sprintf(page, "Huge %5d %5d %5d %5d \n",
+ card->hbpool.count, card->hbnr.min,
+ card->hbnr.init, card->hbnr.max);
+ if (!left--)
+ return sprintf(page, "Iovec %5d %5d %5d %5d \n",
+ card->iovpool.count, card->iovnr.min,
+ card->iovnr.init, card->iovnr.max);
+ if (!left--) {
+ int retval;
+ retval =
+ sprintf(page, "Interrupt counter: %u \n", card->intcnt);
+ card->intcnt = 0;
+ return retval;
+ }
#if 0
- /* Dump 25.6 Mbps PHY registers */
- /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
- here just in case it's needed for debugging. */
- if (card->max_pcr == ATM_25_PCR && !left--)
- {
- u32 phy_regs[4];
- u32 i;
-
- for (i = 0; i < 4; i++)
- {
- while (CMD_BUSY(card));
- writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD);
- while (CMD_BUSY(card));
- phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
- }
-
- return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
- phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]);
- }
+ /* Dump 25.6 Mbps PHY registers */
+ /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
+ here just in case it's needed for debugging. */
+ if (card->max_pcr == ATM_25_PCR && !left--) {
+ u32 phy_regs[4];
+ u32 i;
+
+ for (i = 0; i < 4; i++) {
+ while (CMD_BUSY(card)) ;
+ writel(NS_CMD_READ_UTILITY | 0x00000200 | i,
+ card->membase + CMD);
+ while (CMD_BUSY(card)) ;
+ phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
+ }
+
+ return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
+ phy_regs[0], phy_regs[1], phy_regs[2],
+ phy_regs[3]);
+ }
#endif /* 0 - Dump 25.6 Mbps PHY registers */
#if 0
- /* Dump TST */
- if (left-- < NS_TST_NUM_ENTRIES)
- {
- if (card->tste2vc[left + 1] == NULL)
- return sprintf(page, "%5d - VBR/UBR \n", left + 1);
- else
- return sprintf(page, "%5d - %d %d \n", left + 1,
- card->tste2vc[left + 1]->tx_vcc->vpi,
- card->tste2vc[left + 1]->tx_vcc->vci);
- }
+ /* Dump TST */
+ if (left-- < NS_TST_NUM_ENTRIES) {
+ if (card->tste2vc[left + 1] == NULL)
+ return sprintf(page, "%5d - VBR/UBR \n", left + 1);
+ else
+ return sprintf(page, "%5d - %d %d \n", left + 1,
+ card->tste2vc[left + 1]->tx_vcc->vpi,
+ card->tste2vc[left + 1]->tx_vcc->vci);
+ }
#endif /* 0 */
- return 0;
+ return 0;
}
-
-
-static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
+static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
{
- ns_dev *card;
- pool_levels pl;
- long btype;
- unsigned long flags;
-
- card = dev->dev_data;
- switch (cmd)
- {
- case NS_GETPSTAT:
- if (get_user(pl.buftype, &((pool_levels __user *) arg)->buftype))
- return -EFAULT;
- switch (pl.buftype)
- {
- case NS_BUFTYPE_SMALL:
- pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT));
- pl.level.min = card->sbnr.min;
- pl.level.init = card->sbnr.init;
- pl.level.max = card->sbnr.max;
- break;
-
- case NS_BUFTYPE_LARGE:
- pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT));
- pl.level.min = card->lbnr.min;
- pl.level.init = card->lbnr.init;
- pl.level.max = card->lbnr.max;
- break;
-
- case NS_BUFTYPE_HUGE:
- pl.count = card->hbpool.count;
- pl.level.min = card->hbnr.min;
- pl.level.init = card->hbnr.init;
- pl.level.max = card->hbnr.max;
- break;
-
- case NS_BUFTYPE_IOVEC:
- pl.count = card->iovpool.count;
- pl.level.min = card->iovnr.min;
- pl.level.init = card->iovnr.init;
- pl.level.max = card->iovnr.max;
- break;
-
- default:
- return -ENOIOCTLCMD;
-
- }
- if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl)))
- return (sizeof(pl));
- else
- return -EFAULT;
-
- case NS_SETBUFLEV:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl)))
- return -EFAULT;
- if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max)
- return -EINVAL;
- if (pl.level.min == 0)
- return -EINVAL;
- switch (pl.buftype)
- {
- case NS_BUFTYPE_SMALL:
- if (pl.level.max > TOP_SB)
- return -EINVAL;
- card->sbnr.min = pl.level.min;
- card->sbnr.init = pl.level.init;
- card->sbnr.max = pl.level.max;
- break;
-
- case NS_BUFTYPE_LARGE:
- if (pl.level.max > TOP_LB)
- return -EINVAL;
- card->lbnr.min = pl.level.min;
- card->lbnr.init = pl.level.init;
- card->lbnr.max = pl.level.max;
- break;
-
- case NS_BUFTYPE_HUGE:
- if (pl.level.max > TOP_HB)
- return -EINVAL;
- card->hbnr.min = pl.level.min;
- card->hbnr.init = pl.level.init;
- card->hbnr.max = pl.level.max;
- break;
-
- case NS_BUFTYPE_IOVEC:
- if (pl.level.max > TOP_IOVB)
- return -EINVAL;
- card->iovnr.min = pl.level.min;
- card->iovnr.init = pl.level.init;
- card->iovnr.max = pl.level.max;
- break;
-
- default:
- return -EINVAL;
-
- }
- return 0;
-
- case NS_ADJBUFLEV:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- btype = (long) arg; /* a long is the same size as a pointer or bigger */
- switch (btype)
- {
- case NS_BUFTYPE_SMALL:
- while (card->sbfqc < card->sbnr.init)
- {
- struct sk_buff *sb;
-
- sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
- if (sb == NULL)
- return -ENOMEM;
- NS_SKB_CB(sb)->buf_type = BUF_SM;
- skb_queue_tail(&card->sbpool.queue, sb);
- skb_reserve(sb, NS_AAL0_HEADER);
- push_rxbufs(card, sb);
- }
- break;
-
- case NS_BUFTYPE_LARGE:
- while (card->lbfqc < card->lbnr.init)
- {
- struct sk_buff *lb;
-
- lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
- if (lb == NULL)
- return -ENOMEM;
- NS_SKB_CB(lb)->buf_type = BUF_LG;
- skb_queue_tail(&card->lbpool.queue, lb);
- skb_reserve(lb, NS_SMBUFSIZE);
- push_rxbufs(card, lb);
- }
- break;
-
- case NS_BUFTYPE_HUGE:
- while (card->hbpool.count > card->hbnr.init)
- {
- struct sk_buff *hb;
-
- spin_lock_irqsave(&card->int_lock, flags);
- hb = skb_dequeue(&card->hbpool.queue);
- card->hbpool.count--;
- spin_unlock_irqrestore(&card->int_lock, flags);
- if (hb == NULL)
- printk("nicstar%d: huge buffer count inconsistent.\n",
- card->index);
- else
- dev_kfree_skb_any(hb);
-
- }
- while (card->hbpool.count < card->hbnr.init)
- {
- struct sk_buff *hb;
-
- hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
- if (hb == NULL)
- return -ENOMEM;
- NS_SKB_CB(hb)->buf_type = BUF_NONE;
- spin_lock_irqsave(&card->int_lock, flags);
- skb_queue_tail(&card->hbpool.queue, hb);
- card->hbpool.count++;
- spin_unlock_irqrestore(&card->int_lock, flags);
- }
- break;
-
- case NS_BUFTYPE_IOVEC:
- while (card->iovpool.count > card->iovnr.init)
- {
- struct sk_buff *iovb;
-
- spin_lock_irqsave(&card->int_lock, flags);
- iovb = skb_dequeue(&card->iovpool.queue);
- card->iovpool.count--;
- spin_unlock_irqrestore(&card->int_lock, flags);
- if (iovb == NULL)
- printk("nicstar%d: iovec buffer count inconsistent.\n",
- card->index);
- else
- dev_kfree_skb_any(iovb);
-
- }
- while (card->iovpool.count < card->iovnr.init)
- {
- struct sk_buff *iovb;
-
- iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
- if (iovb == NULL)
- return -ENOMEM;
- NS_SKB_CB(iovb)->buf_type = BUF_NONE;
- spin_lock_irqsave(&card->int_lock, flags);
- skb_queue_tail(&card->iovpool.queue, iovb);
- card->iovpool.count++;
- spin_unlock_irqrestore(&card->int_lock, flags);
- }
- break;
-
- default:
- return -EINVAL;
-
- }
- return 0;
-
- default:
- if (dev->phy && dev->phy->ioctl) {
- return dev->phy->ioctl(dev, cmd, arg);
- }
- else {
- printk("nicstar%d: %s == NULL \n", card->index,
- dev->phy ? "dev->phy->ioctl" : "dev->phy");
- return -ENOIOCTLCMD;
- }
- }
+ ns_dev *card;
+ pool_levels pl;
+ long btype;
+ unsigned long flags;
+
+ card = dev->dev_data;
+ switch (cmd) {
+ case NS_GETPSTAT:
+ if (get_user
+ (pl.buftype, &((pool_levels __user *) arg)->buftype))
+ return -EFAULT;
+ switch (pl.buftype) {
+ case NS_BUFTYPE_SMALL:
+ pl.count =
+ ns_stat_sfbqc_get(readl(card->membase + STAT));
+ pl.level.min = card->sbnr.min;
+ pl.level.init = card->sbnr.init;
+ pl.level.max = card->sbnr.max;
+ break;
+
+ case NS_BUFTYPE_LARGE:
+ pl.count =
+ ns_stat_lfbqc_get(readl(card->membase + STAT));
+ pl.level.min = card->lbnr.min;
+ pl.level.init = card->lbnr.init;
+ pl.level.max = card->lbnr.max;
+ break;
+
+ case NS_BUFTYPE_HUGE:
+ pl.count = card->hbpool.count;
+ pl.level.min = card->hbnr.min;
+ pl.level.init = card->hbnr.init;
+ pl.level.max = card->hbnr.max;
+ break;
+
+ case NS_BUFTYPE_IOVEC:
+ pl.count = card->iovpool.count;
+ pl.level.min = card->iovnr.min;
+ pl.level.init = card->iovnr.init;
+ pl.level.max = card->iovnr.max;
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+
+ }
+ if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl)))
+ return (sizeof(pl));
+ else
+ return -EFAULT;
+
+ case NS_SETBUFLEV:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl)))
+ return -EFAULT;
+ if (pl.level.min >= pl.level.init
+ || pl.level.init >= pl.level.max)
+ return -EINVAL;
+ if (pl.level.min == 0)
+ return -EINVAL;
+ switch (pl.buftype) {
+ case NS_BUFTYPE_SMALL:
+ if (pl.level.max > TOP_SB)
+ return -EINVAL;
+ card->sbnr.min = pl.level.min;
+ card->sbnr.init = pl.level.init;
+ card->sbnr.max = pl.level.max;
+ break;
+
+ case NS_BUFTYPE_LARGE:
+ if (pl.level.max > TOP_LB)
+ return -EINVAL;
+ card->lbnr.min = pl.level.min;
+ card->lbnr.init = pl.level.init;
+ card->lbnr.max = pl.level.max;
+ break;
+
+ case NS_BUFTYPE_HUGE:
+ if (pl.level.max > TOP_HB)
+ return -EINVAL;
+ card->hbnr.min = pl.level.min;
+ card->hbnr.init = pl.level.init;
+ card->hbnr.max = pl.level.max;
+ break;
+
+ case NS_BUFTYPE_IOVEC:
+ if (pl.level.max > TOP_IOVB)
+ return -EINVAL;
+ card->iovnr.min = pl.level.min;
+ card->iovnr.init = pl.level.init;
+ card->iovnr.max = pl.level.max;
+ break;
+
+ default:
+ return -EINVAL;
+
+ }
+ return 0;
+
+ case NS_ADJBUFLEV:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ btype = (long)arg; /* a long is the same size as a pointer or bigger */
+ switch (btype) {
+ case NS_BUFTYPE_SMALL:
+ while (card->sbfqc < card->sbnr.init) {
+ struct sk_buff *sb;
+
+ sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
+ if (sb == NULL)
+ return -ENOMEM;
+ NS_PRV_BUFTYPE(sb) = BUF_SM;
+ skb_queue_tail(&card->sbpool.queue, sb);
+ skb_reserve(sb, NS_AAL0_HEADER);
+ push_rxbufs(card, sb);
+ }
+ break;
+
+ case NS_BUFTYPE_LARGE:
+ while (card->lbfqc < card->lbnr.init) {
+ struct sk_buff *lb;
+
+ lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
+ if (lb == NULL)
+ return -ENOMEM;
+ NS_PRV_BUFTYPE(lb) = BUF_LG;
+ skb_queue_tail(&card->lbpool.queue, lb);
+ skb_reserve(lb, NS_SMBUFSIZE);
+ push_rxbufs(card, lb);
+ }
+ break;
+
+ case NS_BUFTYPE_HUGE:
+ while (card->hbpool.count > card->hbnr.init) {
+ struct sk_buff *hb;
+
+ spin_lock_irqsave(&card->int_lock, flags);
+ hb = skb_dequeue(&card->hbpool.queue);
+ card->hbpool.count--;
+ spin_unlock_irqrestore(&card->int_lock, flags);
+ if (hb == NULL)
+ printk
+ ("nicstar%d: huge buffer count inconsistent.\n",
+ card->index);
+ else
+ dev_kfree_skb_any(hb);
+
+ }
+ while (card->hbpool.count < card->hbnr.init) {
+ struct sk_buff *hb;
+
+ hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
+ if (hb == NULL)
+ return -ENOMEM;
+ NS_PRV_BUFTYPE(hb) = BUF_NONE;
+ spin_lock_irqsave(&card->int_lock, flags);
+ skb_queue_tail(&card->hbpool.queue, hb);
+ card->hbpool.count++;
+ spin_unlock_irqrestore(&card->int_lock, flags);
+ }
+ break;
+
+ case NS_BUFTYPE_IOVEC:
+ while (card->iovpool.count > card->iovnr.init) {
+ struct sk_buff *iovb;
+
+ spin_lock_irqsave(&card->int_lock, flags);
+ iovb = skb_dequeue(&card->iovpool.queue);
+ card->iovpool.count--;
+ spin_unlock_irqrestore(&card->int_lock, flags);
+ if (iovb == NULL)
+ printk
+ ("nicstar%d: iovec buffer count inconsistent.\n",
+ card->index);
+ else
+ dev_kfree_skb_any(iovb);
+
+ }
+ while (card->iovpool.count < card->iovnr.init) {
+ struct sk_buff *iovb;
+
+ iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
+ if (iovb == NULL)
+ return -ENOMEM;
+ NS_PRV_BUFTYPE(iovb) = BUF_NONE;
+ spin_lock_irqsave(&card->int_lock, flags);
+ skb_queue_tail(&card->iovpool.queue, iovb);
+ card->iovpool.count++;
+ spin_unlock_irqrestore(&card->int_lock, flags);
+ }
+ break;
+
+ default:
+ return -EINVAL;
+
+ }
+ return 0;
+
+ default:
+ if (dev->phy && dev->phy->ioctl) {
+ return dev->phy->ioctl(dev, cmd, arg);
+ } else {
+ printk("nicstar%d: %s == NULL \n", card->index,
+ dev->phy ? "dev->phy->ioctl" : "dev->phy");
+ return -ENOIOCTLCMD;
+ }
+ }
}
-
-static void which_list(ns_dev *card, struct sk_buff *skb)
+#ifdef EXTRA_DEBUG
+static void which_list(ns_dev * card, struct sk_buff *skb)
{
- printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type);
+ printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb));
}
-
+#endif /* EXTRA_DEBUG */
static void ns_poll(unsigned long arg)
{
- int i;
- ns_dev *card;
- unsigned long flags;
- u32 stat_r, stat_w;
-
- PRINTK("nicstar: Entering ns_poll().\n");
- for (i = 0; i < num_cards; i++)
- {
- card = cards[i];
- if (spin_is_locked(&card->int_lock)) {
- /* Probably it isn't worth spinning */
- continue;
- }
- spin_lock_irqsave(&card->int_lock, flags);
-
- stat_w = 0;
- stat_r = readl(card->membase + STAT);
- if (stat_r & NS_STAT_TSIF)
- stat_w |= NS_STAT_TSIF;
- if (stat_r & NS_STAT_EOPDU)
- stat_w |= NS_STAT_EOPDU;
-
- process_tsq(card);
- process_rsq(card);
-
- writel(stat_w, card->membase + STAT);
- spin_unlock_irqrestore(&card->int_lock, flags);
- }
- mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
- PRINTK("nicstar: Leaving ns_poll().\n");
+ int i;
+ ns_dev *card;
+ unsigned long flags;
+ u32 stat_r, stat_w;
+
+ PRINTK("nicstar: Entering ns_poll().\n");
+ for (i = 0; i < num_cards; i++) {
+ card = cards[i];
+ if (spin_is_locked(&card->int_lock)) {
+ /* Probably it isn't worth spinning */
+ continue;
+ }
+ spin_lock_irqsave(&card->int_lock, flags);
+
+ stat_w = 0;
+ stat_r = readl(card->membase + STAT);
+ if (stat_r & NS_STAT_TSIF)
+ stat_w |= NS_STAT_TSIF;
+ if (stat_r & NS_STAT_EOPDU)
+ stat_w |= NS_STAT_EOPDU;
+
+ process_tsq(card);
+ process_rsq(card);
+
+ writel(stat_w, card->membase + STAT);
+ spin_unlock_irqrestore(&card->int_lock, flags);
+ }
+ mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
+ PRINTK("nicstar: Leaving ns_poll().\n");
}
-
-
static int ns_parse_mac(char *mac, unsigned char *esi)
{
- int i, j;
- short byte1, byte0;
-
- if (mac == NULL || esi == NULL)
- return -1;
- j = 0;
- for (i = 0; i < 6; i++)
- {
- if ((byte1 = ns_h2i(mac[j++])) < 0)
- return -1;
- if ((byte0 = ns_h2i(mac[j++])) < 0)
- return -1;
- esi[i] = (unsigned char) (byte1 * 16 + byte0);
- if (i < 5)
- {
- if (mac[j++] != ':')
- return -1;
- }
- }
- return 0;
+ int i, j;
+ short byte1, byte0;
+
+ if (mac == NULL || esi == NULL)
+ return -1;
+ j = 0;
+ for (i = 0; i < 6; i++) {
+ if ((byte1 = ns_h2i(mac[j++])) < 0)
+ return -1;
+ if ((byte0 = ns_h2i(mac[j++])) < 0)
+ return -1;
+ esi[i] = (unsigned char)(byte1 * 16 + byte0);
+ if (i < 5) {
+ if (mac[j++] != ':')
+ return -1;
+ }
+ }
+ return 0;
}
-
-
static short ns_h2i(char c)
{
- if (c >= '0' && c <= '9')
- return (short) (c - '0');
- if (c >= 'A' && c <= 'F')
- return (short) (c - 'A' + 10);
- if (c >= 'a' && c <= 'f')
- return (short) (c - 'a' + 10);
- return -1;
+ if (c >= '0' && c <= '9')
+ return (short)(c - '0');
+ if (c >= 'A' && c <= 'F')
+ return (short)(c - 'A' + 10);
+ if (c >= 'a' && c <= 'f')
+ return (short)(c - 'a' + 10);
+ return -1;
}
-
-
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
- unsigned long addr)
+ unsigned long addr)
{
- ns_dev *card;
- unsigned long flags;
-
- card = dev->dev_data;
- spin_lock_irqsave(&card->res_lock, flags);
- while(CMD_BUSY(card));
- writel((unsigned long) value, card->membase + DR0);
- writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
- card->membase + CMD);
- spin_unlock_irqrestore(&card->res_lock, flags);
+ ns_dev *card;
+ unsigned long flags;
+
+ card = dev->dev_data;
+ spin_lock_irqsave(&card->res_lock, flags);
+ while (CMD_BUSY(card)) ;
+ writel((u32) value, card->membase + DR0);
+ writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
+ card->membase + CMD);
+ spin_unlock_irqrestore(&card->res_lock, flags);
}
-
-
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
{
- ns_dev *card;
- unsigned long flags;
- unsigned long data;
-
- card = dev->dev_data;
- spin_lock_irqsave(&card->res_lock, flags);
- while(CMD_BUSY(card));
- writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
- card->membase + CMD);
- while(CMD_BUSY(card));
- data = readl(card->membase + DR0) & 0x000000FF;
- spin_unlock_irqrestore(&card->res_lock, flags);
- return (unsigned char) data;
+ ns_dev *card;
+ unsigned long flags;
+ u32 data;
+
+ card = dev->dev_data;
+ spin_lock_irqsave(&card->res_lock, flags);
+ while (CMD_BUSY(card)) ;
+ writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
+ card->membase + CMD);
+ while (CMD_BUSY(card)) ;
+ data = readl(card->membase + DR0) & 0x000000FF;
+ spin_unlock_irqrestore(&card->res_lock, flags);
+ return (unsigned char)data;
}
-
-
module_init(nicstar_init);
module_exit(nicstar_cleanup);
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h
index 6010e3daa6a2..9bc27ea5088e 100644
--- a/drivers/atm/nicstar.h
+++ b/drivers/atm/nicstar.h
@@ -1,5 +1,4 @@
-/******************************************************************************
- *
+/*
* nicstar.h
*
* Header file for the nicstar device driver.
@@ -8,29 +7,26 @@
* PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999
*
* (C) INESC 1998
- *
- ******************************************************************************/
-
+ */
#ifndef _LINUX_NICSTAR_H_
#define _LINUX_NICSTAR_H_
-
-/* Includes *******************************************************************/
+/* Includes */
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/idr.h>
#include <linux/uio.h>
#include <linux/skbuff.h>
#include <linux/atmdev.h>
#include <linux/atm_nicstar.h>
-
-/* Options ********************************************************************/
+/* Options */
#define NS_MAX_CARDS 4 /* Maximum number of NICStAR based cards
controlled by the device driver. Must
- be <= 5 */
+ be <= 5 */
#undef RCQ_SUPPORT /* Do not define this for now */
@@ -43,7 +39,7 @@
#define NS_VPIBITS 2 /* 0, 1, 2, or 8 */
#define NS_MAX_RCTSIZE 4096 /* Number of entries. 4096 or 16384.
- Define 4096 only if (all) your card(s)
+ Define 4096 only if (all) your card(s)
have 32K x 32bit SRAM, in which case
setting this to 16384 will just waste a
lot of memory.
@@ -51,33 +47,32 @@
128K x 32bit SRAM will limit the maximum
VCI. */
-/*#define NS_PCI_LATENCY 64*/ /* Must be a multiple of 32 */
+ /*#define NS_PCI_LATENCY 64*//* Must be a multiple of 32 */
/* Number of buffers initially allocated */
-#define NUM_SB 32 /* Must be even */
-#define NUM_LB 24 /* Must be even */
-#define NUM_HB 8 /* Pre-allocated huge buffers */
-#define NUM_IOVB 48 /* Iovec buffers */
+#define NUM_SB 32 /* Must be even */
+#define NUM_LB 24 /* Must be even */
+#define NUM_HB 8 /* Pre-allocated huge buffers */
+#define NUM_IOVB 48 /* Iovec buffers */
/* Lower level for count of buffers */
-#define MIN_SB 8 /* Must be even */
-#define MIN_LB 8 /* Must be even */
+#define MIN_SB 8 /* Must be even */
+#define MIN_LB 8 /* Must be even */
#define MIN_HB 6
#define MIN_IOVB 8
/* Upper level for count of buffers */
-#define MAX_SB 64 /* Must be even, <= 508 */
-#define MAX_LB 48 /* Must be even, <= 508 */
+#define MAX_SB 64 /* Must be even, <= 508 */
+#define MAX_LB 48 /* Must be even, <= 508 */
#define MAX_HB 10
#define MAX_IOVB 80
/* These are the absolute maximum allowed for the ioctl() */
-#define TOP_SB 256 /* Must be even, <= 508 */
-#define TOP_LB 128 /* Must be even, <= 508 */
+#define TOP_SB 256 /* Must be even, <= 508 */
+#define TOP_LB 128 /* Must be even, <= 508 */
#define TOP_HB 64
#define TOP_IOVB 256
-
#define MAX_TBD_PER_VC 1 /* Number of TBDs before a TSR */
#define MAX_TBD_PER_SCQ 10 /* Only meaningful for variable rate SCQs */
@@ -89,15 +84,12 @@
#define PCR_TOLERANCE (1.0001)
-
-
-/* ESI stuff ******************************************************************/
+/* ESI stuff */
#define NICSTAR_EPROM_MAC_ADDR_OFFSET 0x6C
#define NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT 0xF6
-
-/* #defines *******************************************************************/
+/* #defines */
#define NS_IOREMAP_SIZE 4096
@@ -123,22 +115,19 @@
#define NS_SMSKBSIZE (NS_SMBUFSIZE + NS_AAL0_HEADER)
#define NS_LGSKBSIZE (NS_SMBUFSIZE + NS_LGBUFSIZE)
+/* NICStAR structures located in host memory */
-/* NICStAR structures located in host memory **********************************/
-
-
-
-/* RSQ - Receive Status Queue
+/*
+ * RSQ - Receive Status Queue
*
* Written by the NICStAR, read by the device driver.
*/
-typedef struct ns_rsqe
-{
- u32 word_1;
- u32 buffer_handle;
- u32 final_aal5_crc32;
- u32 word_4;
+typedef struct ns_rsqe {
+ u32 word_1;
+ u32 buffer_handle;
+ u32 final_aal5_crc32;
+ u32 word_4;
} ns_rsqe;
#define ns_rsqe_vpi(ns_rsqep) \
@@ -175,30 +164,27 @@ typedef struct ns_rsqe
#define ns_rsqe_cellcount(ns_rsqep) \
(le32_to_cpu((ns_rsqep)->word_4) & 0x000001FF)
#define ns_rsqe_init(ns_rsqep) \
- ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000))
+ ((ns_rsqep)->word_4 = cpu_to_le32(0x00000000))
#define NS_RSQ_NUM_ENTRIES (NS_RSQSIZE / 16)
#define NS_RSQ_ALIGNMENT NS_RSQSIZE
-
-
-/* RCQ - Raw Cell Queue
+/*
+ * RCQ - Raw Cell Queue
*
* Written by the NICStAR, read by the device driver.
*/
-typedef struct cell_payload
-{
- u32 word[12];
+typedef struct cell_payload {
+ u32 word[12];
} cell_payload;
-typedef struct ns_rcqe
-{
- u32 word_1;
- u32 word_2;
- u32 word_3;
- u32 word_4;
- cell_payload payload;
+typedef struct ns_rcqe {
+ u32 word_1;
+ u32 word_2;
+ u32 word_3;
+ u32 word_4;
+ cell_payload payload;
} ns_rcqe;
#define NS_RCQE_SIZE 64 /* bytes */
@@ -210,28 +196,25 @@ typedef struct ns_rcqe
#define ns_rcqe_nextbufhandle(ns_rcqep) \
(le32_to_cpu((ns_rcqep)->word_2))
-
-
-/* SCQ - Segmentation Channel Queue
+/*
+ * SCQ - Segmentation Channel Queue
*
* Written by the device driver, read by the NICStAR.
*/
-typedef struct ns_scqe
-{
- u32 word_1;
- u32 word_2;
- u32 word_3;
- u32 word_4;
+typedef struct ns_scqe {
+ u32 word_1;
+ u32 word_2;
+ u32 word_3;
+ u32 word_4;
} ns_scqe;
/* NOTE: SCQ entries can be either a TBD (Transmit Buffer Descriptors)
- or TSR (Transmit Status Requests) */
+ or TSR (Transmit Status Requests) */
#define NS_SCQE_TYPE_TBD 0x00000000
#define NS_SCQE_TYPE_TSR 0x80000000
-
#define NS_TBD_EOPDU 0x40000000
#define NS_TBD_AAL0 0x00000000
#define NS_TBD_AAL34 0x04000000
@@ -253,10 +236,9 @@ typedef struct ns_scqe
#define ns_tbd_mkword_4(gfc, vpi, vci, pt, clp) \
(cpu_to_le32((gfc) << 28 | (vpi) << 20 | (vci) << 4 | (pt) << 1 | (clp)))
-
#define NS_TSR_INTENABLE 0x20000000
-#define NS_TSR_SCDISVBR 0xFFFF /* Use as scdi for VBR SCD */
+#define NS_TSR_SCDISVBR 0xFFFF /* Use as scdi for VBR SCD */
#define ns_tsr_mkword_1(flags) \
(cpu_to_le32(NS_SCQE_TYPE_TSR | (flags)))
@@ -273,22 +255,20 @@ typedef struct ns_scqe
#define NS_SCQE_SIZE 16
-
-
-/* TSQ - Transmit Status Queue
+/*
+ * TSQ - Transmit Status Queue
*
* Written by the NICStAR, read by the device driver.
*/
-typedef struct ns_tsi
-{
- u32 word_1;
- u32 word_2;
+typedef struct ns_tsi {
+ u32 word_1;
+ u32 word_2;
} ns_tsi;
/* NOTE: The first word can be a status word copied from the TSR which
- originated the TSI, or a timer overflow indicator. In this last
- case, the value of the first word is all zeroes. */
+ originated the TSI, or a timer overflow indicator. In this last
+ case, the value of the first word is all zeroes. */
#define NS_TSI_EMPTY 0x80000000
#define NS_TSI_TIMESTAMP_MASK 0x00FFFFFF
@@ -301,12 +281,10 @@ typedef struct ns_tsi
#define ns_tsi_init(ns_tsip) \
((ns_tsip)->word_2 = cpu_to_le32(NS_TSI_EMPTY))
-
#define NS_TSQSIZE 8192
#define NS_TSQ_NUM_ENTRIES 1024
#define NS_TSQ_ALIGNMENT 8192
-
#define NS_TSI_SCDISVBR NS_TSR_SCDISVBR
#define ns_tsi_tmrof(ns_tsip) \
@@ -316,26 +294,22 @@ typedef struct ns_tsi
#define ns_tsi_getscqpos(ns_tsip) \
(le32_to_cpu((ns_tsip)->word_1) & 0x00007FFF)
+/* NICStAR structures located in local SRAM */
-
-/* NICStAR structures located in local SRAM ***********************************/
-
-
-
-/* RCT - Receive Connection Table
+/*
+ * RCT - Receive Connection Table
*
* Written by both the NICStAR and the device driver.
*/
-typedef struct ns_rcte
-{
- u32 word_1;
- u32 buffer_handle;
- u32 dma_address;
- u32 aal5_crc32;
+typedef struct ns_rcte {
+ u32 word_1;
+ u32 buffer_handle;
+ u32 dma_address;
+ u32 aal5_crc32;
} ns_rcte;
-#define NS_RCTE_BSFB 0x00200000 /* Rev. D only */
+#define NS_RCTE_BSFB 0x00200000 /* Rev. D only */
#define NS_RCTE_NZGFC 0x00100000
#define NS_RCTE_CONNECTOPEN 0x00080000
#define NS_RCTE_AALMASK 0x00070000
@@ -358,25 +332,21 @@ typedef struct ns_rcte
#define NS_RCT_ENTRY_SIZE 4 /* Number of dwords */
/* NOTE: We could make macros to contruct the first word of the RCTE,
- but that doesn't seem to make much sense... */
+ but that doesn't seem to make much sense... */
-
-
-/* FBD - Free Buffer Descriptor
+/*
+ * FBD - Free Buffer Descriptor
*
* Written by the device driver using via the command register.
*/
-typedef struct ns_fbd
-{
- u32 buffer_handle;
- u32 dma_address;
+typedef struct ns_fbd {
+ u32 buffer_handle;
+ u32 dma_address;
} ns_fbd;
-
-
-
-/* TST - Transmit Schedule Table
+/*
+ * TST - Transmit Schedule Table
*
* Written by the device driver.
*/
@@ -385,40 +355,38 @@ typedef u32 ns_tste;
#define NS_TST_OPCODE_MASK 0x60000000
-#define NS_TST_OPCODE_NULL 0x00000000 /* Insert null cell */
-#define NS_TST_OPCODE_FIXED 0x20000000 /* Cell from a fixed rate channel */
+#define NS_TST_OPCODE_NULL 0x00000000 /* Insert null cell */
+#define NS_TST_OPCODE_FIXED 0x20000000 /* Cell from a fixed rate channel */
#define NS_TST_OPCODE_VARIABLE 0x40000000
-#define NS_TST_OPCODE_END 0x60000000 /* Jump */
+#define NS_TST_OPCODE_END 0x60000000 /* Jump */
#define ns_tste_make(opcode, sramad) (opcode | sramad)
/* NOTE:
- When the opcode is FIXED, sramad specifies the SRAM address of the
- SCD for that fixed rate channel.
+ SCD for that fixed rate channel.
- When the opcode is END, sramad specifies the SRAM address of the
- location of the next TST entry to read.
+ location of the next TST entry to read.
*/
-
-
-/* SCD - Segmentation Channel Descriptor
+/*
+ * SCD - Segmentation Channel Descriptor
*
* Written by both the device driver and the NICStAR
*/
-typedef struct ns_scd
-{
- u32 word_1;
- u32 word_2;
- u32 partial_aal5_crc;
- u32 reserved;
- ns_scqe cache_a;
- ns_scqe cache_b;
+typedef struct ns_scd {
+ u32 word_1;
+ u32 word_2;
+ u32 partial_aal5_crc;
+ u32 reserved;
+ ns_scqe cache_a;
+ ns_scqe cache_b;
} ns_scd;
-#define NS_SCD_BASE_MASK_VAR 0xFFFFE000 /* Variable rate */
-#define NS_SCD_BASE_MASK_FIX 0xFFFFFC00 /* Fixed rate */
+#define NS_SCD_BASE_MASK_VAR 0xFFFFE000 /* Variable rate */
+#define NS_SCD_BASE_MASK_FIX 0xFFFFFC00 /* Fixed rate */
#define NS_SCD_TAIL_MASK_VAR 0x00001FF0
#define NS_SCD_TAIL_MASK_FIX 0x000003F0
#define NS_SCD_HEAD_MASK_VAR 0x00001FF0
@@ -426,13 +394,9 @@ typedef struct ns_scd
#define NS_SCD_XMITFOREVER 0x02000000
/* NOTE: There are other fields in word 2 of the SCD, but as they should
- not be needed in the device driver they are not defined here. */
-
-
-
-
-/* NICStAR local SRAM memory map **********************************************/
+ not be needed in the device driver they are not defined here. */
+/* NICStAR local SRAM memory map */
#define NS_RCT 0x00000
#define NS_RCT_32_END 0x03FFF
@@ -455,100 +419,93 @@ typedef struct ns_scd
#define NS_LGFBQ 0x1FC00
#define NS_LGFBQ_END 0x1FFFF
-
-
-/* NISCtAR operation registers ************************************************/
-
+/* NISCtAR operation registers */
/* See Section 3.4 of `IDT77211 NICStAR User Manual' from www.idt.com */
-enum ns_regs
-{
- DR0 = 0x00, /* Data Register 0 R/W*/
- DR1 = 0x04, /* Data Register 1 W */
- DR2 = 0x08, /* Data Register 2 W */
- DR3 = 0x0C, /* Data Register 3 W */
- CMD = 0x10, /* Command W */
- CFG = 0x14, /* Configuration R/W */
- STAT = 0x18, /* Status R/W */
- RSQB = 0x1C, /* Receive Status Queue Base W */
- RSQT = 0x20, /* Receive Status Queue Tail R */
- RSQH = 0x24, /* Receive Status Queue Head W */
- CDC = 0x28, /* Cell Drop Counter R/clear */
- VPEC = 0x2C, /* VPI/VCI Lookup Error Count R/clear */
- ICC = 0x30, /* Invalid Cell Count R/clear */
- RAWCT = 0x34, /* Raw Cell Tail R */
- TMR = 0x38, /* Timer R */
- TSTB = 0x3C, /* Transmit Schedule Table Base R/W */
- TSQB = 0x40, /* Transmit Status Queue Base W */
- TSQT = 0x44, /* Transmit Status Queue Tail R */
- TSQH = 0x48, /* Transmit Status Queue Head W */
- GP = 0x4C, /* General Purpose R/W */
- VPM = 0x50 /* VPI/VCI Mask W */
+enum ns_regs {
+ DR0 = 0x00, /* Data Register 0 R/W */
+ DR1 = 0x04, /* Data Register 1 W */
+ DR2 = 0x08, /* Data Register 2 W */
+ DR3 = 0x0C, /* Data Register 3 W */
+ CMD = 0x10, /* Command W */
+ CFG = 0x14, /* Configuration R/W */
+ STAT = 0x18, /* Status R/W */
+ RSQB = 0x1C, /* Receive Status Queue Base W */
+ RSQT = 0x20, /* Receive Status Queue Tail R */
+ RSQH = 0x24, /* Receive Status Queue Head W */
+ CDC = 0x28, /* Cell Drop Counter R/clear */
+ VPEC = 0x2C, /* VPI/VCI Lookup Error Count R/clear */
+ ICC = 0x30, /* Invalid Cell Count R/clear */
+ RAWCT = 0x34, /* Raw Cell Tail R */
+ TMR = 0x38, /* Timer R */
+ TSTB = 0x3C, /* Transmit Schedule Table Base R/W */
+ TSQB = 0x40, /* Transmit Status Queue Base W */
+ TSQT = 0x44, /* Transmit Status Queue Tail R */
+ TSQH = 0x48, /* Transmit Status Queue Head W */
+ GP = 0x4C, /* General Purpose R/W */
+ VPM = 0x50 /* VPI/VCI Mask W */
};
-
-/* NICStAR commands issued to the CMD register ********************************/
-
+/* NICStAR commands issued to the CMD register */
/* Top 4 bits are command opcode, lower 28 are parameters. */
#define NS_CMD_NO_OPERATION 0x00000000
- /* params always 0 */
+ /* params always 0 */
#define NS_CMD_OPENCLOSE_CONNECTION 0x20000000
- /* b19{1=open,0=close} b18-2{SRAM addr} */
+ /* b19{1=open,0=close} b18-2{SRAM addr} */
#define NS_CMD_WRITE_SRAM 0x40000000
- /* b18-2{SRAM addr} b1-0{burst size} */
+ /* b18-2{SRAM addr} b1-0{burst size} */
#define NS_CMD_READ_SRAM 0x50000000
- /* b18-2{SRAM addr} */
+ /* b18-2{SRAM addr} */
#define NS_CMD_WRITE_FREEBUFQ 0x60000000
- /* b0{large buf indicator} */
+ /* b0{large buf indicator} */
#define NS_CMD_READ_UTILITY 0x80000000
- /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */
+ /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */
#define NS_CMD_WRITE_UTILITY 0x90000000
- /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */
+ /* b8{1=select UTL_CS1} b9{1=select UTL_CS0} b7-0{bus addr} */
#define NS_CMD_OPEN_CONNECTION (NS_CMD_OPENCLOSE_CONNECTION | 0x00080000)
#define NS_CMD_CLOSE_CONNECTION NS_CMD_OPENCLOSE_CONNECTION
-
-/* NICStAR configuration bits *************************************************/
-
-#define NS_CFG_SWRST 0x80000000 /* Software Reset */
-#define NS_CFG_RXPATH 0x20000000 /* Receive Path Enable */
-#define NS_CFG_SMBUFSIZE_MASK 0x18000000 /* Small Receive Buffer Size */
-#define NS_CFG_LGBUFSIZE_MASK 0x06000000 /* Large Receive Buffer Size */
-#define NS_CFG_EFBIE 0x01000000 /* Empty Free Buffer Queue
- Interrupt Enable */
-#define NS_CFG_RSQSIZE_MASK 0x00C00000 /* Receive Status Queue Size */
-#define NS_CFG_ICACCEPT 0x00200000 /* Invalid Cell Accept */
-#define NS_CFG_IGNOREGFC 0x00100000 /* Ignore General Flow Control */
-#define NS_CFG_VPIBITS_MASK 0x000C0000 /* VPI/VCI Bits Size Select */
-#define NS_CFG_RCTSIZE_MASK 0x00030000 /* Receive Connection Table Size */
-#define NS_CFG_VCERRACCEPT 0x00008000 /* VPI/VCI Error Cell Accept */
-#define NS_CFG_RXINT_MASK 0x00007000 /* End of Receive PDU Interrupt
- Handling */
-#define NS_CFG_RAWIE 0x00000800 /* Raw Cell Qu' Interrupt Enable */
-#define NS_CFG_RSQAFIE 0x00000400 /* Receive Queue Almost Full
- Interrupt Enable */
-#define NS_CFG_RXRM 0x00000200 /* Receive RM Cells */
-#define NS_CFG_TMRROIE 0x00000080 /* Timer Roll Over Interrupt
- Enable */
-#define NS_CFG_TXEN 0x00000020 /* Transmit Operation Enable */
-#define NS_CFG_TXIE 0x00000010 /* Transmit Status Interrupt
- Enable */
-#define NS_CFG_TXURIE 0x00000008 /* Transmit Under-run Interrupt
- Enable */
-#define NS_CFG_UMODE 0x00000004 /* Utopia Mode (cell/byte) Select */
-#define NS_CFG_TSQFIE 0x00000002 /* Transmit Status Queue Full
- Interrupt Enable */
-#define NS_CFG_PHYIE 0x00000001 /* PHY Interrupt Enable */
+/* NICStAR configuration bits */
+
+#define NS_CFG_SWRST 0x80000000 /* Software Reset */
+#define NS_CFG_RXPATH 0x20000000 /* Receive Path Enable */
+#define NS_CFG_SMBUFSIZE_MASK 0x18000000 /* Small Receive Buffer Size */
+#define NS_CFG_LGBUFSIZE_MASK 0x06000000 /* Large Receive Buffer Size */
+#define NS_CFG_EFBIE 0x01000000 /* Empty Free Buffer Queue
+ Interrupt Enable */
+#define NS_CFG_RSQSIZE_MASK 0x00C00000 /* Receive Status Queue Size */
+#define NS_CFG_ICACCEPT 0x00200000 /* Invalid Cell Accept */
+#define NS_CFG_IGNOREGFC 0x00100000 /* Ignore General Flow Control */
+#define NS_CFG_VPIBITS_MASK 0x000C0000 /* VPI/VCI Bits Size Select */
+#define NS_CFG_RCTSIZE_MASK 0x00030000 /* Receive Connection Table Size */
+#define NS_CFG_VCERRACCEPT 0x00008000 /* VPI/VCI Error Cell Accept */
+#define NS_CFG_RXINT_MASK 0x00007000 /* End of Receive PDU Interrupt
+ Handling */
+#define NS_CFG_RAWIE 0x00000800 /* Raw Cell Qu' Interrupt Enable */
+#define NS_CFG_RSQAFIE 0x00000400 /* Receive Queue Almost Full
+ Interrupt Enable */
+#define NS_CFG_RXRM 0x00000200 /* Receive RM Cells */
+#define NS_CFG_TMRROIE 0x00000080 /* Timer Roll Over Interrupt
+ Enable */
+#define NS_CFG_TXEN 0x00000020 /* Transmit Operation Enable */
+#define NS_CFG_TXIE 0x00000010 /* Transmit Status Interrupt
+ Enable */
+#define NS_CFG_TXURIE 0x00000008 /* Transmit Under-run Interrupt
+ Enable */
+#define NS_CFG_UMODE 0x00000004 /* Utopia Mode (cell/byte) Select */
+#define NS_CFG_TSQFIE 0x00000002 /* Transmit Status Queue Full
+ Interrupt Enable */
+#define NS_CFG_PHYIE 0x00000001 /* PHY Interrupt Enable */
#define NS_CFG_SMBUFSIZE_48 0x00000000
#define NS_CFG_SMBUFSIZE_96 0x08000000
@@ -579,33 +536,29 @@ enum ns_regs
#define NS_CFG_RXINT_624US 0x00003000
#define NS_CFG_RXINT_899US 0x00004000
-
-/* NICStAR STATus bits ********************************************************/
-
-#define NS_STAT_SFBQC_MASK 0xFF000000 /* hi 8 bits Small Buffer Queue Count */
-#define NS_STAT_LFBQC_MASK 0x00FF0000 /* hi 8 bits Large Buffer Queue Count */
-#define NS_STAT_TSIF 0x00008000 /* Transmit Status Queue Indicator */
-#define NS_STAT_TXICP 0x00004000 /* Transmit Incomplete PDU */
-#define NS_STAT_TSQF 0x00001000 /* Transmit Status Queue Full */
-#define NS_STAT_TMROF 0x00000800 /* Timer Overflow */
-#define NS_STAT_PHYI 0x00000400 /* PHY Device Interrupt */
-#define NS_STAT_CMDBZ 0x00000200 /* Command Busy */
-#define NS_STAT_SFBQF 0x00000100 /* Small Buffer Queue Full */
-#define NS_STAT_LFBQF 0x00000080 /* Large Buffer Queue Full */
-#define NS_STAT_RSQF 0x00000040 /* Receive Status Queue Full */
-#define NS_STAT_EOPDU 0x00000020 /* End of PDU */
-#define NS_STAT_RAWCF 0x00000010 /* Raw Cell Flag */
-#define NS_STAT_SFBQE 0x00000008 /* Small Buffer Queue Empty */
-#define NS_STAT_LFBQE 0x00000004 /* Large Buffer Queue Empty */
-#define NS_STAT_RSQAF 0x00000002 /* Receive Status Queue Almost Full */
+/* NICStAR STATus bits */
+
+#define NS_STAT_SFBQC_MASK 0xFF000000 /* hi 8 bits Small Buffer Queue Count */
+#define NS_STAT_LFBQC_MASK 0x00FF0000 /* hi 8 bits Large Buffer Queue Count */
+#define NS_STAT_TSIF 0x00008000 /* Transmit Status Queue Indicator */
+#define NS_STAT_TXICP 0x00004000 /* Transmit Incomplete PDU */
+#define NS_STAT_TSQF 0x00001000 /* Transmit Status Queue Full */
+#define NS_STAT_TMROF 0x00000800 /* Timer Overflow */
+#define NS_STAT_PHYI 0x00000400 /* PHY Device Interrupt */
+#define NS_STAT_CMDBZ 0x00000200 /* Command Busy */
+#define NS_STAT_SFBQF 0x00000100 /* Small Buffer Queue Full */
+#define NS_STAT_LFBQF 0x00000080 /* Large Buffer Queue Full */
+#define NS_STAT_RSQF 0x00000040 /* Receive Status Queue Full */
+#define NS_STAT_EOPDU 0x00000020 /* End of PDU */
+#define NS_STAT_RAWCF 0x00000010 /* Raw Cell Flag */
+#define NS_STAT_SFBQE 0x00000008 /* Small Buffer Queue Empty */
+#define NS_STAT_LFBQE 0x00000004 /* Large Buffer Queue Empty */
+#define NS_STAT_RSQAF 0x00000002 /* Receive Status Queue Almost Full */
#define ns_stat_sfbqc_get(stat) (((stat) & NS_STAT_SFBQC_MASK) >> 23)
#define ns_stat_lfbqc_get(stat) (((stat) & NS_STAT_LFBQC_MASK) >> 15)
-
-
-/* #defines which depend on other #defines ************************************/
-
+/* #defines which depend on other #defines */
#define NS_TST0 NS_TST_FRSCD
#define NS_TST1 (NS_TST_FRSCD + NS_TST_NUM_ENTRIES + 1)
@@ -672,8 +625,7 @@ enum ns_regs
#define NS_CFG_TSQFIE_OPT 0x00000000
#endif /* ENABLE_TSQFIE */
-
-/* PCI stuff ******************************************************************/
+/* PCI stuff */
#ifndef PCI_VENDOR_ID_IDT
#define PCI_VENDOR_ID_IDT 0x111D
@@ -683,138 +635,124 @@ enum ns_regs
#define PCI_DEVICE_ID_IDT_IDT77201 0x0001
#endif /* PCI_DEVICE_ID_IDT_IDT77201 */
+/* Device driver structures */
-
-/* Device driver structures ***************************************************/
-
-
-struct ns_skb_cb {
- u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */
+struct ns_skb_prv {
+ u32 buf_type; /* BUF_SM/BUF_LG/BUF_NONE */
+ u32 dma;
+ int iovcnt;
};
-#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb))
-
-typedef struct tsq_info
-{
- void *org;
- ns_tsi *base;
- ns_tsi *next;
- ns_tsi *last;
+#define NS_PRV_BUFTYPE(skb) \
+ (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->buf_type)
+#define NS_PRV_DMA(skb) \
+ (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->dma)
+#define NS_PRV_IOVCNT(skb) \
+ (((struct ns_skb_prv *)(ATM_SKB(skb)+1))->iovcnt)
+
+typedef struct tsq_info {
+ void *org;
+ dma_addr_t dma;
+ ns_tsi *base;
+ ns_tsi *next;
+ ns_tsi *last;
} tsq_info;
-
-typedef struct scq_info
-{
- void *org;
- ns_scqe *base;
- ns_scqe *last;
- ns_scqe *next;
- volatile ns_scqe *tail; /* Not related to the nicstar register */
- unsigned num_entries;
- struct sk_buff **skb; /* Pointer to an array of pointers
- to the sk_buffs used for tx */
- u32 scd; /* SRAM address of the corresponding
- SCD */
- int tbd_count; /* Only meaningful on variable rate */
- wait_queue_head_t scqfull_waitq;
- volatile char full; /* SCQ full indicator */
- spinlock_t lock; /* SCQ spinlock */
+typedef struct scq_info {
+ void *org;
+ dma_addr_t dma;
+ ns_scqe *base;
+ ns_scqe *last;
+ ns_scqe *next;
+ volatile ns_scqe *tail; /* Not related to the nicstar register */
+ unsigned num_entries;
+ struct sk_buff **skb; /* Pointer to an array of pointers
+ to the sk_buffs used for tx */
+ u32 scd; /* SRAM address of the corresponding
+ SCD */
+ int tbd_count; /* Only meaningful on variable rate */
+ wait_queue_head_t scqfull_waitq;
+ volatile char full; /* SCQ full indicator */
+ spinlock_t lock; /* SCQ spinlock */
} scq_info;
-
-
-typedef struct rsq_info
-{
- void *org;
- ns_rsqe *base;
- ns_rsqe *next;
- ns_rsqe *last;
+typedef struct rsq_info {
+ void *org;
+ dma_addr_t dma;
+ ns_rsqe *base;
+ ns_rsqe *next;
+ ns_rsqe *last;
} rsq_info;
-
-typedef struct skb_pool
-{
- volatile int count; /* number of buffers in the queue */
- struct sk_buff_head queue;
+typedef struct skb_pool {
+ volatile int count; /* number of buffers in the queue */
+ struct sk_buff_head queue;
} skb_pool;
/* NOTE: for small and large buffer pools, the count is not used, as the
actual value used for buffer management is the one read from the
card. */
-
-typedef struct vc_map
-{
- volatile unsigned int tx:1; /* TX vc? */
- volatile unsigned int rx:1; /* RX vc? */
- struct atm_vcc *tx_vcc, *rx_vcc;
- struct sk_buff *rx_iov; /* RX iovector skb */
- scq_info *scq; /* To keep track of the SCQ */
- u32 cbr_scd; /* SRAM address of the corresponding
- SCD. 0x00000000 for UBR/VBR/ABR */
- int tbd_count;
+typedef struct vc_map {
+ volatile unsigned int tx:1; /* TX vc? */
+ volatile unsigned int rx:1; /* RX vc? */
+ struct atm_vcc *tx_vcc, *rx_vcc;
+ struct sk_buff *rx_iov; /* RX iovector skb */
+ scq_info *scq; /* To keep track of the SCQ */
+ u32 cbr_scd; /* SRAM address of the corresponding
+ SCD. 0x00000000 for UBR/VBR/ABR */
+ int tbd_count;
} vc_map;
-
-struct ns_skb_data
-{
- struct atm_vcc *vcc;
- int iovcnt;
-};
-
-#define NS_SKB(skb) (((struct ns_skb_data *) (skb)->cb))
-
-
-typedef struct ns_dev
-{
- int index; /* Card ID to the device driver */
- int sram_size; /* In k x 32bit words. 32 or 128 */
- void __iomem *membase; /* Card's memory base address */
- unsigned long max_pcr;
- int rct_size; /* Number of entries */
- int vpibits;
- int vcibits;
- struct pci_dev *pcidev;
- struct atm_dev *atmdev;
- tsq_info tsq;
- rsq_info rsq;
- scq_info *scq0, *scq1, *scq2; /* VBR SCQs */
- skb_pool sbpool; /* Small buffers */
- skb_pool lbpool; /* Large buffers */
- skb_pool hbpool; /* Pre-allocated huge buffers */
- skb_pool iovpool; /* iovector buffers */
- volatile int efbie; /* Empty free buf. queue int. enabled */
- volatile u32 tst_addr; /* SRAM address of the TST in use */
- volatile int tst_free_entries;
- vc_map vcmap[NS_MAX_RCTSIZE];
- vc_map *tste2vc[NS_TST_NUM_ENTRIES];
- vc_map *scd2vc[NS_FRSCD_NUM];
- buf_nr sbnr;
- buf_nr lbnr;
- buf_nr hbnr;
- buf_nr iovnr;
- int sbfqc;
- int lbfqc;
- u32 sm_handle;
- u32 sm_addr;
- u32 lg_handle;
- u32 lg_addr;
- struct sk_buff *rcbuf; /* Current raw cell buffer */
- u32 rawch; /* Raw cell queue head */
- unsigned intcnt; /* Interrupt counter */
- spinlock_t int_lock; /* Interrupt lock */
- spinlock_t res_lock; /* Card resource lock */
+typedef struct ns_dev {
+ int index; /* Card ID to the device driver */
+ int sram_size; /* In k x 32bit words. 32 or 128 */
+ void __iomem *membase; /* Card's memory base address */
+ unsigned long max_pcr;
+ int rct_size; /* Number of entries */
+ int vpibits;
+ int vcibits;
+ struct pci_dev *pcidev;
+ struct idr idr;
+ struct atm_dev *atmdev;
+ tsq_info tsq;
+ rsq_info rsq;
+ scq_info *scq0, *scq1, *scq2; /* VBR SCQs */
+ skb_pool sbpool; /* Small buffers */
+ skb_pool lbpool; /* Large buffers */
+ skb_pool hbpool; /* Pre-allocated huge buffers */
+ skb_pool iovpool; /* iovector buffers */
+ volatile int efbie; /* Empty free buf. queue int. enabled */
+ volatile u32 tst_addr; /* SRAM address of the TST in use */
+ volatile int tst_free_entries;
+ vc_map vcmap[NS_MAX_RCTSIZE];
+ vc_map *tste2vc[NS_TST_NUM_ENTRIES];
+ vc_map *scd2vc[NS_FRSCD_NUM];
+ buf_nr sbnr;
+ buf_nr lbnr;
+ buf_nr hbnr;
+ buf_nr iovnr;
+ int sbfqc;
+ int lbfqc;
+ struct sk_buff *sm_handle;
+ u32 sm_addr;
+ struct sk_buff *lg_handle;
+ u32 lg_addr;
+ struct sk_buff *rcbuf; /* Current raw cell buffer */
+ struct ns_rcqe *rawcell;
+ u32 rawch; /* Raw cell queue head */
+ unsigned intcnt; /* Interrupt counter */
+ spinlock_t int_lock; /* Interrupt lock */
+ spinlock_t res_lock; /* Card resource lock */
} ns_dev;
-
/* NOTE: Each tste2vc entry relates a given TST entry to the corresponding
- CBR vc. If the entry is not allocated, it must be NULL.
-
- There are two TSTs so the driver can modify them on the fly
- without stopping the transmission.
-
- scd2vc allows us to find out unused fixed rate SCDs, because
- they must have a NULL pointer here. */
+ CBR vc. If the entry is not allocated, it must be NULL.
+
+ There are two TSTs so the driver can modify them on the fly
+ without stopping the transmission.
+ scd2vc allows us to find out unused fixed rate SCDs, because
+ they must have a NULL pointer here. */
#endif /* _LINUX_NICSTAR_H_ */
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c
index 842e26c45557..f594526f8c6d 100644
--- a/drivers/atm/nicstarmac.c
+++ b/drivers/atm/nicstarmac.c
@@ -13,15 +13,15 @@ typedef void __iomem *virt_addr_t;
#define CYCLE_DELAY 5
-/* This was the original definition
+/*
+ This was the original definition
#define osp_MicroDelay(microsec) \
do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
*/
#define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \
udelay((useconds));}
-
-
-/* The following tables represent the timing diagrams found in
+/*
+ * The following tables represent the timing diagrams found in
* the Data Sheet for the Xicor X25020 EEProm. The #defines below
* represent the bits in the NICStAR's General Purpose register
* that must be toggled for the corresponding actions on the EEProm
@@ -31,86 +31,80 @@ typedef void __iomem *virt_addr_t;
/* Write Data To EEProm from SI line on rising edge of CLK */
/* Read Data From EEProm on falling edge of CLK */
-#define CS_HIGH 0x0002 /* Chip select high */
-#define CS_LOW 0x0000 /* Chip select low (active low)*/
-#define CLK_HIGH 0x0004 /* Clock high */
-#define CLK_LOW 0x0000 /* Clock low */
-#define SI_HIGH 0x0001 /* Serial input data high */
-#define SI_LOW 0x0000 /* Serial input data low */
+#define CS_HIGH 0x0002 /* Chip select high */
+#define CS_LOW 0x0000 /* Chip select low (active low) */
+#define CLK_HIGH 0x0004 /* Clock high */
+#define CLK_LOW 0x0000 /* Clock low */
+#define SI_HIGH 0x0001 /* Serial input data high */
+#define SI_LOW 0x0000 /* Serial input data low */
/* Read Status Register = 0000 0101b */
#if 0
-static u_int32_t rdsrtab[] =
-{
- CS_HIGH | CLK_HIGH,
- CS_LOW | CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW | SI_HIGH,
- CLK_HIGH | SI_HIGH, /* 1 */
- CLK_LOW | SI_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW | SI_HIGH,
- CLK_HIGH | SI_HIGH /* 1 */
+static u_int32_t rdsrtab[] = {
+ CS_HIGH | CLK_HIGH,
+ CS_LOW | CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW | SI_HIGH,
+ CLK_HIGH | SI_HIGH, /* 1 */
+ CLK_LOW | SI_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW | SI_HIGH,
+ CLK_HIGH | SI_HIGH /* 1 */
};
-#endif /* 0 */
-
+#endif /* 0 */
/* Read from EEPROM = 0000 0011b */
-static u_int32_t readtab[] =
-{
- /*
- CS_HIGH | CLK_HIGH,
- */
- CS_LOW | CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW,
- CLK_HIGH, /* 0 */
- CLK_LOW | SI_HIGH,
- CLK_HIGH | SI_HIGH, /* 1 */
- CLK_LOW | SI_HIGH,
- CLK_HIGH | SI_HIGH /* 1 */
+static u_int32_t readtab[] = {
+ /*
+ CS_HIGH | CLK_HIGH,
+ */
+ CS_LOW | CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW,
+ CLK_HIGH, /* 0 */
+ CLK_LOW | SI_HIGH,
+ CLK_HIGH | SI_HIGH, /* 1 */
+ CLK_LOW | SI_HIGH,
+ CLK_HIGH | SI_HIGH /* 1 */
};
-
/* Clock to read from/write to the eeprom */
-static u_int32_t clocktab[] =
-{
- CLK_LOW,
- CLK_HIGH,
- CLK_LOW,
- CLK_HIGH,
- CLK_LOW,
- CLK_HIGH,
- CLK_LOW,
- CLK_HIGH,
- CLK_LOW,
- CLK_HIGH,
- CLK_LOW,
- CLK_HIGH,
- CLK_LOW,
- CLK_HIGH,
- CLK_LOW,
- CLK_HIGH,
- CLK_LOW
+static u_int32_t clocktab[] = {
+ CLK_LOW,
+ CLK_HIGH,
+ CLK_LOW,
+ CLK_HIGH,
+ CLK_LOW,
+ CLK_HIGH,
+ CLK_LOW,
+ CLK_HIGH,
+ CLK_LOW,
+ CLK_HIGH,
+ CLK_LOW,
+ CLK_HIGH,
+ CLK_LOW,
+ CLK_HIGH,
+ CLK_LOW,
+ CLK_HIGH,
+ CLK_LOW
};
-
#define NICSTAR_REG_WRITE(bs, reg, val) \
while ( readl(bs + STAT) & 0x0200 ) ; \
writel((val),(base)+(reg))
@@ -124,153 +118,131 @@ static u_int32_t clocktab[] =
* register.
*/
#if 0
-u_int32_t
-nicstar_read_eprom_status( virt_addr_t base )
+u_int32_t nicstar_read_eprom_status(virt_addr_t base)
{
- u_int32_t val;
- u_int32_t rbyte;
- int32_t i, j;
-
- /* Send read instruction */
- val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0;
-
- for (i=0; i<ARRAY_SIZE(rdsrtab); i++)
- {
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | rdsrtab[i]) );
- osp_MicroDelay( CYCLE_DELAY );
- }
-
- /* Done sending instruction - now pull data off of bit 16, MSB first */
- /* Data clocked out of eeprom on falling edge of clock */
-
- rbyte = 0;
- for (i=7, j=0; i>=0; i--)
- {
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | clocktab[j++]) );
- rbyte |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE)
- & 0x00010000) >> 16) << i);
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | clocktab[j++]) );
- osp_MicroDelay( CYCLE_DELAY );
- }
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 );
- osp_MicroDelay( CYCLE_DELAY );
- return rbyte;
+ u_int32_t val;
+ u_int32_t rbyte;
+ int32_t i, j;
+
+ /* Send read instruction */
+ val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
+
+ for (i = 0; i < ARRAY_SIZE(rdsrtab); i++) {
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | rdsrtab[i]));
+ osp_MicroDelay(CYCLE_DELAY);
+ }
+
+ /* Done sending instruction - now pull data off of bit 16, MSB first */
+ /* Data clocked out of eeprom on falling edge of clock */
+
+ rbyte = 0;
+ for (i = 7, j = 0; i >= 0; i--) {
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | clocktab[j++]));
+ rbyte |= (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE)
+ & 0x00010000) >> 16) << i);
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | clocktab[j++]));
+ osp_MicroDelay(CYCLE_DELAY);
+ }
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2);
+ osp_MicroDelay(CYCLE_DELAY);
+ return rbyte;
}
-#endif /* 0 */
-
+#endif /* 0 */
/*
* This routine will clock the Read_data function into the X2520
* eeprom, followed by the address to read from, through the NicSTaR's General
* Purpose register.
*/
-
-static u_int8_t
-read_eprom_byte(virt_addr_t base, u_int8_t offset)
+
+static u_int8_t read_eprom_byte(virt_addr_t base, u_int8_t offset)
{
- u_int32_t val = 0;
- int i,j=0;
- u_int8_t tempread = 0;
-
- val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0;
-
- /* Send READ instruction */
- for (i=0; i<ARRAY_SIZE(readtab); i++)
- {
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | readtab[i]) );
- osp_MicroDelay( CYCLE_DELAY );
- }
-
- /* Next, we need to send the byte address to read from */
- for (i=7; i>=0; i--)
- {
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | clocktab[j++] | ((offset >> i) & 1) ) );
- osp_MicroDelay(CYCLE_DELAY);
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | clocktab[j++] | ((offset >> i) & 1) ) );
- osp_MicroDelay( CYCLE_DELAY );
- }
-
- j = 0;
-
- /* Now, we can read data from the eeprom by clocking it in */
- for (i=7; i>=0; i--)
- {
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | clocktab[j++]) );
- osp_MicroDelay( CYCLE_DELAY );
- tempread |= (((NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE )
- & 0x00010000) >> 16) << i);
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | clocktab[j++]) );
- osp_MicroDelay( CYCLE_DELAY );
- }
-
- NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE, 2 );
- osp_MicroDelay( CYCLE_DELAY );
- return tempread;
+ u_int32_t val = 0;
+ int i, j = 0;
+ u_int8_t tempread = 0;
+
+ val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
+
+ /* Send READ instruction */
+ for (i = 0; i < ARRAY_SIZE(readtab); i++) {
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | readtab[i]));
+ osp_MicroDelay(CYCLE_DELAY);
+ }
+
+ /* Next, we need to send the byte address to read from */
+ for (i = 7; i >= 0; i--) {
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | clocktab[j++] | ((offset >> i) & 1)));
+ osp_MicroDelay(CYCLE_DELAY);
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | clocktab[j++] | ((offset >> i) & 1)));
+ osp_MicroDelay(CYCLE_DELAY);
+ }
+
+ j = 0;
+
+ /* Now, we can read data from the eeprom by clocking it in */
+ for (i = 7; i >= 0; i--) {
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | clocktab[j++]));
+ osp_MicroDelay(CYCLE_DELAY);
+ tempread |=
+ (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE)
+ & 0x00010000) >> 16) << i);
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | clocktab[j++]));
+ osp_MicroDelay(CYCLE_DELAY);
+ }
+
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2);
+ osp_MicroDelay(CYCLE_DELAY);
+ return tempread;
}
-
-static void
-nicstar_init_eprom( virt_addr_t base )
+static void nicstar_init_eprom(virt_addr_t base)
{
- u_int32_t val;
+ u_int32_t val;
- /*
- * turn chip select off
- */
- val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
+ /*
+ * turn chip select off
+ */
+ val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
- NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | CS_HIGH | CLK_HIGH));
- osp_MicroDelay( CYCLE_DELAY );
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | CS_HIGH | CLK_HIGH));
+ osp_MicroDelay(CYCLE_DELAY);
- NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | CS_HIGH | CLK_LOW));
- osp_MicroDelay( CYCLE_DELAY );
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | CS_HIGH | CLK_LOW));
+ osp_MicroDelay(CYCLE_DELAY);
- NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | CS_HIGH | CLK_HIGH));
- osp_MicroDelay( CYCLE_DELAY );
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | CS_HIGH | CLK_HIGH));
+ osp_MicroDelay(CYCLE_DELAY);
- NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
- (val | CS_HIGH | CLK_LOW));
- osp_MicroDelay( CYCLE_DELAY );
+ NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
+ (val | CS_HIGH | CLK_LOW));
+ osp_MicroDelay(CYCLE_DELAY);
}
-
/*
* This routine will be the interface to the ReadPromByte function
* above.
- */
+ */
static void
-nicstar_read_eprom(
- virt_addr_t base,
- u_int8_t prom_offset,
- u_int8_t *buffer,
- u_int32_t nbytes )
+nicstar_read_eprom(virt_addr_t base,
+ u_int8_t prom_offset, u_int8_t * buffer, u_int32_t nbytes)
{
- u_int i;
-
- for (i=0; i<nbytes; i++)
- {
- buffer[i] = read_eprom_byte( base, prom_offset );
- ++prom_offset;
- osp_MicroDelay( CYCLE_DELAY );
- }
-}
-
+ u_int i;
-/*
-void osp_MicroDelay(int x) {
-
+ for (i = 0; i < nbytes; i++) {
+ buffer[i] = read_eprom_byte(base, prom_offset);
+ ++prom_offset;
+ osp_MicroDelay(CYCLE_DELAY);
+ }
}
-*/
-
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0b926e45afe2..a5ea1bce9689 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -215,7 +215,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- neigh_event_send(rt->u.dst.neighbour, NULL);
+ neigh_event_send(rt->dst.neighbour, NULL);
ret = -ENODATA;
if (neigh)
goto release;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index ebfb117ba68b..abd683ea326d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1364,7 +1364,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
__func__);
goto reject;
}
- dst = &rt->u.dst;
+ dst = &rt->dst;
l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
@@ -1932,7 +1932,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -EHOSTUNREACH;
goto fail3;
}
- ep->dst = &rt->u.dst;
+ ep->dst = &rt->dst;
/* get a l2t entry */
ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 30ce0a8eca09..8b693c8c25e2 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1364,7 +1364,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
__func__);
goto reject;
}
- dst = &rt->u.dst;
+ dst = &rt->dst;
if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, peer_ip);
BUG_ON(!pdev);
@@ -1938,7 +1938,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -EHOSTUNREACH;
goto fail3;
}
- ep->dst = &rt->u.dst;
+ ep->dst = &rt->dst;
/* get a l2t entry */
if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 986d6f32dded..d876d0435cd4 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1146,7 +1146,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
}
if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
- neigh_event_send(rt->u.dst.neighbour, NULL);
+ neigh_event_send(rt->dst.neighbour, NULL);
ip_rt_put(rt);
return rc;
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index f37b8f68d0aa..8c46baee621b 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -449,14 +449,9 @@ static int get_filter(void __user *arg, struct sock_filter **p)
/* uprog.len is unsigned short, so no overflow here */
len = uprog.len * sizeof(struct sock_filter);
- code = kmalloc(len, GFP_KERNEL);
- if (code == NULL)
- return -ENOMEM;
-
- if (copy_from_user(code, uprog.filter, len)) {
- kfree(code);
- return -EFAULT;
- }
+ code = memdup_user(uprog.filter, len);
+ if (IS_ERR(code))
+ return PTR_ERR(code);
err = sk_chk_filter(code, uprog.len);
if (err) {
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 123c1d6c43b4..1507d2e83fbb 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -411,14 +411,10 @@ static int pcbit_writecmd(const u_char __user *buf, int len, int driver, int cha
return -EINVAL;
}
- cbuf = kmalloc(len, GFP_KERNEL);
- if (!cbuf)
- return -ENOMEM;
+ cbuf = memdup_user(buf, len);
+ if (IS_ERR(cbuf))
+ return PTR_ERR(cbuf);
- if (copy_from_user(cbuf, buf, len)) {
- kfree(cbuf);
- return -EFAULT;
- }
memcpy_toio(dev->sh_mem, cbuf, len);
kfree(cbuf);
return len;
diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c
index 1081091bbfaf..43c5dc3516e5 100644
--- a/drivers/isdn/sc/ioctl.c
+++ b/drivers/isdn/sc/ioctl.c
@@ -215,19 +215,13 @@ int sc_ioctl(int card, scs_ioctl *data)
pr_debug("%s: DCBIOSETSPID: ioctl received\n",
sc_adapter[card]->devicename);
- spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
- if(!spid) {
- kfree(rcvmsg);
- return -ENOMEM;
- }
-
/*
* Get the spid from user space
*/
- if (copy_from_user(spid, data->dataptr, SCIOC_SPIDSIZE)) {
+ spid = memdup_user(data->dataptr, SCIOC_SPIDSIZE);
+ if (IS_ERR(spid)) {
kfree(rcvmsg);
- kfree(spid);
- return -EFAULT;
+ return PTR_ERR(spid);
}
pr_debug("%s: SCIOCSETSPID: setting channel %d spid to %s\n",
@@ -296,18 +290,13 @@ int sc_ioctl(int card, scs_ioctl *data)
pr_debug("%s: SCIOSETDN: ioctl received\n",
sc_adapter[card]->devicename);
- dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL);
- if (!dn) {
- kfree(rcvmsg);
- return -ENOMEM;
- }
/*
* Get the spid from user space
*/
- if (copy_from_user(dn, data->dataptr, SCIOC_DNSIZE)) {
+ dn = memdup_user(data->dataptr, SCIOC_DNSIZE);
+ if (IS_ERR(dn)) {
kfree(rcvmsg);
- kfree(dn);
- return -EFAULT;
+ return PTR_ERR(dn);
}
pr_debug("%s: SCIOCSETDN: setting channel %d dn to %s\n",
diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h
index 75e28fef797b..d693b8d15cde 100644
--- a/drivers/net/3c527.h
+++ b/drivers/net/3c527.h
@@ -34,7 +34,7 @@ struct mc32_mailbox
{
u16 mbox;
u16 data[1];
-} __attribute((packed));
+} __packed;
struct skb_header
{
@@ -43,7 +43,7 @@ struct skb_header
u16 next; /* Do not change! */
u16 length;
u32 data;
-} __attribute((packed));
+} __packed;
struct mc32_stats
{
@@ -68,7 +68,7 @@ struct mc32_stats
u32 dataA[6];
u16 dataB[5];
u32 dataC[14];
-} __attribute((packed));
+} __packed;
#define STATUS_MASK 0x0F
#define COMPLETED (1<<7)
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 284a5f4a63ac..4a4f6b81e32d 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -322,7 +322,7 @@ struct cp_dma_stats {
__le32 rx_ok_mcast;
__le16 tx_abort;
__le16 tx_underrun;
-} __attribute__((packed));
+} __packed;
struct cp_extra_stats {
unsigned long rx_frags;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 97d8068b372b..f5166dccd8df 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -662,7 +662,7 @@ static const struct ethtool_ops rtl8139_ethtool_ops;
/* read MMIO register */
#define RTL_R8(reg) ioread8 (ioaddr + (reg))
#define RTL_R16(reg) ioread16 (ioaddr + (reg))
-#define RTL_R32(reg) ((unsigned long) ioread32 (ioaddr + (reg)))
+#define RTL_R32(reg) ioread32 (ioaddr + (reg))
static const u16 rtl8139_intr_mask =
@@ -862,7 +862,7 @@ retry:
/* if unknown chip, assume array element #0, original RTL-8139 in this case */
i = 0;
dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
- dev_dbg(&pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig));
+ dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
tp->chipset = 0;
match:
@@ -1643,7 +1643,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n",
tp->cur_tx, tp->dirty_tx);
for (i = 0; i < NUM_TX_DESC; i++)
- netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
+ netdev_dbg(dev, "Tx descriptor %d is %08x%s\n",
i, RTL_R32(TxStatus0 + (i * 4)),
i == tp->dirty_tx % NUM_TX_DESC ?
" (queue head)" : "");
@@ -2487,7 +2487,7 @@ static void __set_rx_mode (struct net_device *dev)
int rx_mode;
u32 tmp;
- netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n",
+ netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n",
dev->flags, RTL_R32(RxConfig));
/* Note: do not reorder, GCC is clever about common statements. */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ce2fcdd4ab90..71e6f8fc0cf1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1659,6 +1659,7 @@ config R6040
depends on NET_PCI && PCI
select CRC32
select MII
+ select PHYLIB
help
This is a driver for the R6040 Fast Ethernet MACs found in the
the RDC R-321x System-on-chips.
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 355797f70048..42fce91b71fc 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -37,69 +37,6 @@
#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n"
-
-static void rx(struct net_device *dev, int bufnum,
- struct archdr *pkthdr, int length);
-static int build_header(struct sk_buff *skb,
- struct net_device *dev,
- unsigned short type,
- uint8_t daddr);
-static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
- int bufnum);
-static int ack_tx(struct net_device *dev, int acked);
-
-
-static struct ArcProto capmode_proto =
-{
- 'r',
- XMTU,
- 0,
- rx,
- build_header,
- prepare_tx,
- NULL,
- ack_tx
-};
-
-
-static void arcnet_cap_init(void)
-{
- int count;
-
- for (count = 1; count <= 8; count++)
- if (arc_proto_map[count] == arc_proto_default)
- arc_proto_map[count] = &capmode_proto;
-
- /* for cap mode, we only set the bcast proto if there's no better one */
- if (arc_bcast_proto == arc_proto_default)
- arc_bcast_proto = &capmode_proto;
-
- arc_proto_default = &capmode_proto;
- arc_raw_proto = &capmode_proto;
-}
-
-
-#ifdef MODULE
-
-static int __init capmode_module_init(void)
-{
- printk(VERSION);
- arcnet_cap_init();
- return 0;
-}
-
-static void __exit capmode_module_exit(void)
-{
- arcnet_unregister_proto(&capmode_proto);
-}
-module_init(capmode_module_init);
-module_exit(capmode_module_exit);
-
-MODULE_LICENSE("GPL");
-#endif /* MODULE */
-
-
-
/* packet receiver */
static void rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length)
@@ -231,65 +168,107 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
length,ofs);
- // Copy the arcnet-header + the protocol byte down:
+ /* Copy the arcnet-header + the protocol byte down: */
lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto,
sizeof(pkt->soft.cap.proto));
- // Skip the extra integer we have written into it as a cookie
- // but write the rest of the message:
+ /* Skip the extra integer we have written into it as a cookie
+ but write the rest of the message: */
lp->hw.copy_to_card(dev, bufnum, ofs+1,
((unsigned char*)&pkt->soft.cap.mes),length-1);
lp->lastload_dest = hard->dest;
- return 1; /* done */
+ return 1; /* done */
}
-
static int ack_tx(struct net_device *dev, int acked)
{
- struct arcnet_local *lp = netdev_priv(dev);
- struct sk_buff *ackskb;
- struct archdr *ackpkt;
- int length=sizeof(struct arc_cap);
+ struct arcnet_local *lp = netdev_priv(dev);
+ struct sk_buff *ackskb;
+ struct archdr *ackpkt;
+ int length=sizeof(struct arc_cap);
- BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
- lp->outgoing.skb->protocol, acked);
+ BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
+ lp->outgoing.skb->protocol, acked);
- BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
- /* Now alloc a skb to send back up through the layers: */
- ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
- if (ackskb == NULL) {
- BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
- goto free_outskb;
- }
+ /* Now alloc a skb to send back up through the layers: */
+ ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
+ if (ackskb == NULL) {
+ BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
+ goto free_outskb;
+ }
+
+ skb_put(ackskb, length + ARC_HDR_SIZE );
+ ackskb->dev = dev;
+
+ skb_reset_mac_header(ackskb);
+ ackpkt = (struct archdr *)skb_mac_header(ackskb);
+ /* skb_pull(ackskb, ARC_HDR_SIZE); */
- skb_put(ackskb, length + ARC_HDR_SIZE );
- ackskb->dev = dev;
+ skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
+ ARC_HDR_SIZE + sizeof(struct arc_cap));
+ ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
+ ackpkt->soft.cap.mes.ack=acked;
- skb_reset_mac_header(ackskb);
- ackpkt = (struct archdr *)skb_mac_header(ackskb);
- /* skb_pull(ackskb, ARC_HDR_SIZE); */
+ BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
+ *((int*)&ackpkt->soft.cap.cookie[0]));
+ ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
- skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
- ARC_HDR_SIZE + sizeof(struct arc_cap));
- ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
- ackpkt->soft.cap.mes.ack=acked;
+ BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
+ netif_rx(ackskb);
- BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
- *((int*)&ackpkt->soft.cap.cookie[0]));
+free_outskb:
+ dev_kfree_skb_irq(lp->outgoing.skb);
+ lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
- ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
+ return 0;
+}
- BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
- netif_rx(ackskb);
+static struct ArcProto capmode_proto =
+{
+ 'r',
+ XMTU,
+ 0,
+ rx,
+ build_header,
+ prepare_tx,
+ NULL,
+ ack_tx
+};
- free_outskb:
- dev_kfree_skb_irq(lp->outgoing.skb);
- lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
+static void arcnet_cap_init(void)
+{
+ int count;
- return 0;
+ for (count = 1; count <= 8; count++)
+ if (arc_proto_map[count] == arc_proto_default)
+ arc_proto_map[count] = &capmode_proto;
+
+ /* for cap mode, we only set the bcast proto if there's no better one */
+ if (arc_bcast_proto == arc_proto_default)
+ arc_bcast_proto = &capmode_proto;
+
+ arc_proto_default = &capmode_proto;
+ arc_raw_proto = &capmode_proto;
}
+
+static int __init capmode_module_init(void)
+{
+ printk(VERSION);
+ arcnet_cap_init();
+ return 0;
+}
+
+static void __exit capmode_module_exit(void)
+{
+ arcnet_unregister_proto(&capmode_proto);
+}
+module_init(capmode_module_init);
+module_exit(capmode_module_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 24df0325090c..ee2f8425dbe7 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -738,6 +738,17 @@ static void eth_set_mcast_list(struct net_device *dev)
struct netdev_hw_addr *ha;
u8 diffs[ETH_ALEN], *addr;
int i;
+ static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < ETH_ALEN; i++) {
+ __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
+ __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
+ }
+ __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
+ &port->regs->rx_control[0]);
+ return;
+ }
if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 84ae905bf732..52abbbdf8a08 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -73,7 +73,8 @@
#define FULL_DUPLEX 2
#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MAX_JUMBO_FRAME_SIZE (9*1024)
+#define MAX_JUMBO_FRAME_SIZE (6*1024)
+#define MAX_TSO_FRAME_SIZE (7*1024)
#define MAX_TX_OFFLOAD_THRESH (9*1024)
#define AT_MAX_RECEIVE_QUEUE 4
@@ -87,10 +88,11 @@
#define AT_MAX_INT_WORK 5
#define AT_TWSI_EEPROM_TIMEOUT 100
#define AT_HW_MAX_IDLE_DELAY 10
-#define AT_SUSPEND_LINK_TIMEOUT 28
+#define AT_SUSPEND_LINK_TIMEOUT 100
#define AT_ASPM_L0S_TIMER 6
#define AT_ASPM_L1_TIMER 12
+#define AT_LCKDET_TIMER 12
#define ATL1C_PCIE_L0S_L1_DISABLE 0x01
#define ATL1C_PCIE_PHY_RESET 0x02
@@ -316,6 +318,7 @@ enum atl1c_nic_type {
athr_l2c_b,
athr_l2c_b2,
athr_l1d,
+ athr_l1d_2,
};
enum atl1c_trans_queue {
@@ -392,6 +395,8 @@ struct atl1c_hw {
u16 subsystem_id;
u16 subsystem_vendor_id;
u8 revision_id;
+ u16 phy_id1;
+ u16 phy_id2;
u32 intr_mask;
u8 dmaw_dly_cnt;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index f1389d664a21..d8501f060957 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -37,6 +37,9 @@ int atl1c_check_eeprom_exist(struct atl1c_hw *hw)
if (data & TWSI_DEBUG_DEV_EXIST)
return 1;
+ AT_READ_REG(hw, REG_MASTER_CTRL, &data);
+ if (data & MASTER_CTRL_OTP_SEL)
+ return 1;
return 0;
}
@@ -69,6 +72,8 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
u32 i;
u32 otp_ctrl_data;
u32 twsi_ctrl_data;
+ u32 ltssm_ctrl_data;
+ u32 wol_data;
u8 eth_addr[ETH_ALEN];
u16 phy_data;
bool raise_vol = false;
@@ -104,6 +109,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
udelay(20);
raise_vol = true;
}
+ /* close open bit of ReadOnly*/
+ AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &ltssm_ctrl_data);
+ ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO;
+ AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data);
+
+ /* clear any WOL settings */
+ AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+ AT_READ_REG(hw, REG_WOL_CTRL, &wol_data);
+
AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
@@ -119,17 +133,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
}
/* Disable OTP_CLK */
if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
- if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
- otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
- AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
- AT_WRITE_FLUSH(hw);
- msleep(1);
- }
+ otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+ msleep(1);
}
if (raise_vol) {
if (hw->nic_type == athr_l2c_b ||
hw->nic_type == athr_l2c_b2 ||
- hw->nic_type == athr_l1d) {
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l1d_2) {
atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
goto out;
@@ -456,14 +468,22 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
if (hw->nic_type == athr_l2c_b ||
hw->nic_type == athr_l2c_b2 ||
- hw->nic_type == athr_l1d) {
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l1d_2) {
atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
msleep(20);
}
-
- /*Enable PHY LinkChange Interrupt */
+ if (hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
+ }
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
+ || hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
+ }
err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
if (err) {
if (netif_msg_hw(adapter))
@@ -482,12 +502,10 @@ int atl1c_phy_init(struct atl1c_hw *hw)
struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
- u16 phy_id1, phy_id2;
- if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) ||
- (atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) {
- if (netif_msg_link(adapter))
- dev_err(&pdev->dev, "Error get phy ID\n");
+ if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
+ (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
+ dev_err(&pdev->dev, "Error get phy ID\n");
return -1;
}
switch (hw->media_type) {
@@ -572,6 +590,65 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
return 0;
}
+int atl1c_phy_power_saving(struct atl1c_hw *hw)
+{
+ struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ int ret = 0;
+ u16 autoneg_advertised = ADVERTISED_10baseT_Half;
+ u16 save_autoneg_advertised;
+ u16 phy_data;
+ u16 mii_lpa_data;
+ u16 speed = SPEED_0;
+ u16 duplex = FULL_DUPLEX;
+ int i;
+
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ if (phy_data & BMSR_LSTATUS) {
+ atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data);
+ if (mii_lpa_data & LPA_10FULL)
+ autoneg_advertised = ADVERTISED_10baseT_Full;
+ else if (mii_lpa_data & LPA_10HALF)
+ autoneg_advertised = ADVERTISED_10baseT_Half;
+ else if (mii_lpa_data & LPA_100HALF)
+ autoneg_advertised = ADVERTISED_100baseT_Half;
+ else if (mii_lpa_data & LPA_100FULL)
+ autoneg_advertised = ADVERTISED_100baseT_Full;
+
+ save_autoneg_advertised = hw->autoneg_advertised;
+ hw->phy_configured = false;
+ hw->autoneg_advertised = autoneg_advertised;
+ if (atl1c_restart_autoneg(hw) != 0) {
+ dev_dbg(&pdev->dev, "phy autoneg failed\n");
+ ret = -1;
+ }
+ hw->autoneg_advertised = save_autoneg_advertised;
+
+ if (mii_lpa_data) {
+ for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
+ mdelay(100);
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
+ if (phy_data & BMSR_LSTATUS) {
+ if (atl1c_get_speed_and_duplex(hw, &speed,
+ &duplex) != 0)
+ dev_dbg(&pdev->dev,
+ "get speed and duplex failed\n");
+ break;
+ }
+ }
+ }
+ } else {
+ speed = SPEED_10;
+ duplex = HALF_DUPLEX;
+ }
+ adapter->link_speed = speed;
+ adapter->link_duplex = duplex;
+
+ return ret;
+}
+
int atl1c_restart_autoneg(struct atl1c_hw *hw)
{
int err = 0;
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 1eeb3ed9f0cb..3dd675979aa1 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -42,7 +42,7 @@ bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value);
int atl1c_phy_init(struct atl1c_hw *hw);
int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
int atl1c_restart_autoneg(struct atl1c_hw *hw);
-
+int atl1c_phy_power_saving(struct atl1c_hw *hw);
/* register definition */
#define REG_DEVICE_CAP 0x5C
#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
@@ -120,6 +120,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define REG_PCIE_PHYMISC 0x1000
#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
+#define REG_PCIE_PHYMISC2 0x1004
+#define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3
+#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16
+#define PCIE_PHYMISC2_SERDES_TH_MASK 0x3
+#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18
+
#define REG_TWSI_DEBUG 0x1108
#define TWSI_DEBUG_DEV_EXIST 0x20000000
@@ -150,24 +156,28 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define PM_CTRL_ASPM_L0S_EN 0x00001000
#define PM_CTRL_CLK_SWH_L1 0x00002000
#define PM_CTRL_CLK_PWM_VER1_1 0x00004000
-#define PM_CTRL_PCIE_RECV 0x00008000
+#define PM_CTRL_RCVR_WT_TIMER 0x00008000
#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF
#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
#define PM_CTRL_PM_REQ_TIMER_MASK 0xF
#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
-#define PM_CTRL_LCKDET_TIMER_MASK 0x3F
+#define PM_CTRL_LCKDET_TIMER_MASK 0xF
#define PM_CTRL_LCKDET_TIMER_SHIFT 24
#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000
#define PM_CTRL_SA_DLY_EN 0x20000000
#define PM_CTRL_MAC_ASPM_CHK 0x40000000
#define PM_CTRL_HOTRST 0x80000000
+#define REG_LTSSM_ID_CTRL 0x12FC
+#define LTSSM_ID_EN_WRO 0x1000
/* Selene Master Control Register */
#define REG_MASTER_CTRL 0x1400
#define MASTER_CTRL_SOFT_RST 0x1
#define MASTER_CTRL_TEST_MODE_MASK 0x3
#define MASTER_CTRL_TEST_MODE_SHIFT 2
#define MASTER_CTRL_BERT_START 0x10
+#define MASTER_CTRL_OOB_DIS_OFF 0x40
+#define MASTER_CTRL_SA_TIMER_EN 0x80
#define MASTER_CTRL_MTIMER_EN 0x100
#define MASTER_CTRL_MANUAL_INT 0x200
#define MASTER_CTRL_TX_ITIMER_EN 0x400
@@ -220,6 +230,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
GPHY_CTRL_PWDOWN_HW |\
GPHY_CTRL_PHY_IDDQ)
+#define GPHY_CTRL_POWER_SAVING ( \
+ GPHY_CTRL_SEL_ANA_RST |\
+ GPHY_CTRL_HIB_EN |\
+ GPHY_CTRL_HIB_PULSE |\
+ GPHY_CTRL_PWDOWN_HW |\
+ GPHY_CTRL_PHY_IDDQ)
/* Block IDLE Status Register */
#define REG_IDLE_STATUS 0x1410
#define IDLE_STATUS_MASK 0x00FF
@@ -287,6 +303,14 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal
* comes from Analog SerDes */
#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */
+#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE
+#define SERDES_LOCK_STS_SELFB_PLL_MASK 0x3
+#define SERDES_OVCLK_18_25 0x0
+#define SERDES_OVCLK_12_18 0x1
+#define SERDES_OVCLK_0_4 0x2
+#define SERDES_OVCLK_4_12 0x3
+#define SERDES_MAC_CLK_SLOWDOWN 0x20000
+#define SERDES_PYH_CLK_SLOWDOWN 0x40000
/* MAC Control Register */
#define REG_MAC_CTRL 0x1480
@@ -693,6 +717,21 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define REG_MAC_TX_STATUS_BIN 0x1760
#define REG_MAC_TX_STATUS_END 0x17c0
+#define REG_CLK_GATING_CTRL 0x1814
+#define CLK_GATING_DMAW_EN 0x0001
+#define CLK_GATING_DMAR_EN 0x0002
+#define CLK_GATING_TXQ_EN 0x0004
+#define CLK_GATING_RXQ_EN 0x0008
+#define CLK_GATING_TXMAC_EN 0x0010
+#define CLK_GATING_RXMAC_EN 0x0020
+
+#define CLK_GATING_EN_ALL (CLK_GATING_DMAW_EN |\
+ CLK_GATING_DMAR_EN |\
+ CLK_GATING_TXQ_EN |\
+ CLK_GATING_RXQ_EN |\
+ CLK_GATING_TXMAC_EN|\
+ CLK_GATING_RXMAC_EN)
+
/* DEBUG ADDR */
#define REG_DEBUG_DATA0 0x1900
#define REG_DEBUG_DATA1 0x1904
@@ -734,6 +773,10 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define MII_PHYSID1 0x02
#define MII_PHYSID2 0x03
+#define L1D_MPW_PHYID1 0xD01C /* V7 */
+#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
+#define L1D_MPW_PHYID3 0xD01E /* V8 */
+
/* Autoneg Advertisement Register */
#define MII_ADVERTISE 0x04
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1c3c046d5f34..c7b8ef507ebd 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -21,7 +21,7 @@
#include "atl1c.h"
-#define ATL1C_DRV_VERSION "1.0.0.2-NAPI"
+#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
char atl1c_driver_name[] = "atl1c";
char atl1c_driver_version[] = ATL1C_DRV_VERSION;
#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
@@ -29,7 +29,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
-
+#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
#define L2CB_V10 0xc0
#define L2CB_V11 0xc1
@@ -97,7 +97,28 @@ static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
+static void atl1c_pcie_patch(struct atl1c_hw *hw)
+{
+ u32 data;
+ AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
+ data |= PCIE_PHYMISC_FORCE_RCV_DET;
+ AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
+
+ if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
+ AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
+
+ data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK <<
+ PCIE_PHYMISC2_SERDES_CDR_SHIFT);
+ data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
+ data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
+ PCIE_PHYMISC2_SERDES_TH_SHIFT);
+ data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
+ AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
+ }
+}
+
+/* FIXME: no need any more ? */
/*
* atl1c_init_pcie - init PCIE module
*/
@@ -127,6 +148,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
data &= ~PCIE_UC_SERVRITY_FCP;
AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
+ AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
+ data &= ~LTSSM_ID_EN_WRO;
+ AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
+
+ atl1c_pcie_patch(hw);
if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
atl1c_disable_l0s_l1(hw);
if (flag & ATL1C_PCIE_PHY_RESET)
@@ -135,7 +161,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
AT_WRITE_REG(hw, REG_GPHY_CTRL,
GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
- msleep(1);
+ msleep(5);
}
/*
@@ -159,6 +185,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
+ AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
AT_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
}
@@ -231,15 +258,15 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
if ((phy_data & BMSR_LSTATUS) == 0) {
/* link down */
- if (netif_carrier_ok(netdev)) {
- hw->hibernate = true;
- if (atl1c_stop_mac(hw) != 0)
- if (netif_msg_hw(adapter))
- dev_warn(&pdev->dev,
- "stop mac failed\n");
- atl1c_set_aspm(hw, false);
- }
+ hw->hibernate = true;
+ if (atl1c_stop_mac(hw) != 0)
+ if (netif_msg_hw(adapter))
+ dev_warn(&pdev->dev, "stop mac failed\n");
+ atl1c_set_aspm(hw, false);
netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ atl1c_phy_reset(hw);
+ atl1c_phy_init(&adapter->hw);
} else {
/* Link Up */
hw->hibernate = false;
@@ -308,6 +335,7 @@ static void atl1c_common_task(struct work_struct *work)
netdev = adapter->netdev;
if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
+ adapter->work_event &= ~ATL1C_WORK_EVENT_RESET;
netif_device_detach(netdev);
atl1c_down(adapter);
atl1c_up(adapter);
@@ -315,8 +343,11 @@ static void atl1c_common_task(struct work_struct *work)
return;
}
- if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
+ if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) {
+ adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE;
atl1c_check_link_status(adapter);
+ }
+ return;
}
@@ -476,6 +507,13 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
adapter->hw.max_frame_size = new_mtu;
atl1c_set_rxbufsize(adapter, netdev);
+ if (new_mtu > MAX_TSO_FRAME_SIZE) {
+ adapter->netdev->features &= ~NETIF_F_TSO;
+ adapter->netdev->features &= ~NETIF_F_TSO6;
+ } else {
+ adapter->netdev->features |= NETIF_F_TSO;
+ adapter->netdev->features |= NETIF_F_TSO6;
+ }
atl1c_down(adapter);
atl1c_up(adapter);
clear_bit(__AT_RESETTING, &adapter->flags);
@@ -613,6 +651,9 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
case PCI_DEVICE_ID_ATHEROS_L1D:
hw->nic_type = athr_l1d;
break;
+ case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
+ hw->nic_type = athr_l1d_2;
+ break;
default:
break;
}
@@ -627,9 +668,7 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
- hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ |
- ATL1C_INTR_MODRT_ENABLE |
- ATL1C_RX_IPV6_CHKSUM |
+ hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
ATL1C_TXQ_MODE_ENHANCE;
if (link_ctrl_data & LINK_CTRL_L0S_EN)
hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
@@ -637,12 +676,12 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
+ hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
if (hw->nic_type == athr_l1c ||
- hw->nic_type == athr_l1d) {
- hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l1d_2)
hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
- }
return 0;
}
/*
@@ -657,6 +696,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ u32 revision;
+
adapter->wol = 0;
adapter->link_speed = SPEED_0;
@@ -669,7 +710,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
-
+ AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
+ hw->revision_id = revision & 0xFF;
/* before link up, we assume hibernate is true */
hw->hibernate = true;
hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
@@ -974,6 +1016,7 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
int i;
+ u32 data;
/* TPD */
AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
@@ -1017,6 +1060,23 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
(u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
(u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
+ if (hw->nic_type == athr_l2c_b) {
+ AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
+ AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
+ AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
+ AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
+ AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
+ AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
+ AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/
+ AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/
+ }
+ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
+ /* Power Saving for L2c_B */
+ AT_READ_REG(hw, REG_SERDES_LOCK, &data);
+ data |= SERDES_MAC_CLK_SLOWDOWN;
+ data |= SERDES_PYH_CLK_SLOWDOWN;
+ AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
+ }
/* Load all of base address above */
AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
}
@@ -1029,6 +1089,7 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
u16 tx_offload_thresh;
u32 txq_ctrl_data;
u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
+ u32 max_pay_load_data;
extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
@@ -1046,8 +1107,11 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
TXQ_NUM_TPD_BURST_SHIFT;
if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
- txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] &
+ max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] &
TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
+ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2)
+ max_pay_load_data >>= 1;
+ txq_ctrl_data |= max_pay_load_data;
AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
}
@@ -1078,7 +1142,7 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter)
rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
RSS_HASH_BITS_SHIFT;
if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
- rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M &
+ rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
@@ -1198,21 +1262,23 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
{
struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
struct pci_dev *pdev = adapter->pdev;
- int ret;
+ u32 master_ctrl_data = 0;
AT_WRITE_REG(hw, REG_IMR, 0);
AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
- ret = atl1c_stop_mac(hw);
- if (ret)
- return ret;
+ atl1c_stop_mac(hw);
/*
* Issue Soft Reset to the MAC. This will reset the chip's
* transmit, receive, DMA. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
+ AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+ master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF;
+ AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST)
+ & 0xFFFF));
+
AT_WRITE_FLUSH(hw);
msleep(10);
/* Wait at least 10ms for All module to be Idle */
@@ -1253,42 +1319,39 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
{
u32 pm_ctrl_data;
u32 link_ctrl_data;
+ u32 link_l1_timer = 0xF;
AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
- pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
PM_CTRL_L1_ENTRY_TIMER_SHIFT);
pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
- PM_CTRL_LCKDET_TIMER_SHIFT);
-
- pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
- pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
- pm_ctrl_data |= PM_CTRL_RBER_EN;
- pm_ctrl_data |= PM_CTRL_SDES_EN;
+ PM_CTRL_LCKDET_TIMER_SHIFT);
+ pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT;
- if (hw->nic_type == athr_l2c_b ||
- hw->nic_type == athr_l1d ||
- hw->nic_type == athr_l2c_b2) {
+ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
- if (hw->nic_type == athr_l2c_b &&
- hw->revision_id == L2CB_V10)
+ if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10)
link_ctrl_data |= LINK_CTRL_EXT_SYNC;
}
AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
- pm_ctrl_data |= PM_CTRL_PCIE_RECV;
- pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT;
- pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S;
+ pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER;
+ pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK <<
+ PM_CTRL_PM_REQ_TIMER_SHIFT);
+ pm_ctrl_data |= AT_ASPM_L1_TIMER <<
+ PM_CTRL_PM_REQ_TIMER_SHIFT;
pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
pm_ctrl_data &= ~PM_CTRL_HOTRST;
pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
}
-
+ pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
if (linkup) {
pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
@@ -1297,27 +1360,26 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
- if (hw->nic_type == athr_l2c_b ||
- hw->nic_type == athr_l1d ||
- hw->nic_type == athr_l2c_b2) {
+ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
if (hw->nic_type == athr_l2c_b)
if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
- pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
- if (hw->adapter->link_speed == SPEED_100 ||
- hw->adapter->link_speed == SPEED_1000) {
- pm_ctrl_data &=
- ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
- PM_CTRL_L1_ENTRY_TIMER_SHIFT);
- if (hw->nic_type == athr_l1d)
- pm_ctrl_data |= 0xF <<
- PM_CTRL_L1_ENTRY_TIMER_SHIFT;
- else
- pm_ctrl_data |= 7 <<
- PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ if (hw->adapter->link_speed == SPEED_100 ||
+ hw->adapter->link_speed == SPEED_1000) {
+ pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ if (hw->nic_type == athr_l2c_b)
+ link_l1_timer = 7;
+ else if (hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d_2)
+ link_l1_timer = 4;
+ pm_ctrl_data |= link_l1_timer <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT;
}
} else {
pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
@@ -1326,24 +1388,12 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
- }
- atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
- if (hw->adapter->link_speed == SPEED_10)
- if (hw->nic_type == athr_l1d)
- atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D);
- else
- atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
- else if (hw->adapter->link_speed == SPEED_100)
- atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD);
- else
- atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD);
+ }
} else {
- pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
-
pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
@@ -1351,8 +1401,9 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
else
pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
}
-
AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
+
+ return;
}
static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
@@ -1391,7 +1442,8 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
- if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) {
+ if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d_2) {
mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
}
@@ -1409,6 +1461,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
struct atl1c_hw *hw = &adapter->hw;
u32 master_ctrl_data = 0;
u32 intr_modrt_data;
+ u32 data;
/* clear interrupt status */
AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
@@ -1418,6 +1471,15 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
* HW will enable self to assert interrupt event to system after
* waiting x-time for software to notify it accept interrupt.
*/
+
+ data = CLK_GATING_EN_ALL;
+ if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
+ if (hw->nic_type == athr_l2c_b)
+ data &= ~CLK_GATING_RXMAC_EN;
+ } else
+ data = 0;
+ AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
+
AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
hw->ict & INT_RETRIG_TIMER_MASK);
@@ -1436,6 +1498,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
+ master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
@@ -1624,11 +1687,9 @@ static irqreturn_t atl1c_intr(int irq, void *data)
"atl1c hardware error (status = 0x%x)\n",
status & ISR_ERROR);
/* reset MAC */
- hw->intr_mask &= ~ISR_ERROR;
- AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
adapter->work_event |= ATL1C_WORK_EVENT_RESET;
schedule_work(&adapter->common_task);
- break;
+ return IRQ_HANDLED;
}
if (status & ISR_OVER)
@@ -2303,7 +2364,6 @@ void atl1c_down(struct atl1c_adapter *adapter)
napi_disable(&adapter->napi);
atl1c_irq_disable(adapter);
atl1c_free_irq(adapter);
- AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
/* reset MAC to disable all RX/TX */
atl1c_reset_mac(&adapter->hw);
msleep(1);
@@ -2387,79 +2447,68 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
- u32 ctrl;
- u32 mac_ctrl_data;
- u32 master_ctrl_data;
+ u32 mac_ctrl_data = 0;
+ u32 master_ctrl_data = 0;
u32 wol_ctrl_data = 0;
- u16 mii_bmsr_data;
- u16 save_autoneg_advertised;
- u16 mii_intr_status_data;
+ u16 mii_intr_status_data = 0;
u32 wufc = adapter->wol;
- u32 i;
int retval = 0;
+ atl1c_disable_l0s_l1(hw);
if (netif_running(netdev)) {
WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
atl1c_down(adapter);
}
netif_device_detach(netdev);
- atl1c_disable_l0s_l1(hw);
retval = pci_save_state(pdev);
if (retval)
return retval;
+
+ if (wufc)
+ if (atl1c_phy_power_saving(hw) != 0)
+ dev_dbg(&pdev->dev, "phy power saving failed");
+
+ AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+ AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
+
+ master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
+ mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
+ mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
+ MAC_CTRL_PRMLEN_MASK) <<
+ MAC_CTRL_PRMLEN_SHIFT);
+ mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
+ mac_ctrl_data &= ~MAC_CTRL_DUPLX;
+
if (wufc) {
- AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
- master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
-
- /* get link status */
- atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- save_autoneg_advertised = hw->autoneg_advertised;
- hw->autoneg_advertised = ADVERTISED_10baseT_Half;
- if (atl1c_restart_autoneg(hw) != 0)
- if (netif_msg_link(adapter))
- dev_warn(&pdev->dev, "phy autoneg failed\n");
- hw->phy_configured = false; /* re-init PHY when resume */
- hw->autoneg_advertised = save_autoneg_advertised;
+ mac_ctrl_data |= MAC_CTRL_RX_EN;
+ if (adapter->link_speed == SPEED_1000 ||
+ adapter->link_speed == SPEED_0) {
+ mac_ctrl_data |= atl1c_mac_speed_1000 <<
+ MAC_CTRL_SPEED_SHIFT;
+ mac_ctrl_data |= MAC_CTRL_DUPLX;
+ } else
+ mac_ctrl_data |= atl1c_mac_speed_10_100 <<
+ MAC_CTRL_SPEED_SHIFT;
+
+ if (adapter->link_duplex == DUPLEX_FULL)
+ mac_ctrl_data |= MAC_CTRL_DUPLX;
+
/* turn on magic packet wol */
if (wufc & AT_WUFC_MAG)
- wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+ wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
if (wufc & AT_WUFC_LNKC) {
- for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
- msleep(100);
- atl1c_read_phy_reg(hw, MII_BMSR,
- (u16 *)&mii_bmsr_data);
- if (mii_bmsr_data & BMSR_LSTATUS)
- break;
- }
- if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
- if (netif_msg_link(adapter))
- dev_warn(&pdev->dev,
- "%s: Link may change"
- "when suspend\n",
- atl1c_driver_name);
wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
/* only link up can wake up */
if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
- if (netif_msg_link(adapter))
- dev_err(&pdev->dev,
- "%s: read write phy "
- "register failed.\n",
- atl1c_driver_name);
- goto wol_dis;
+ dev_dbg(&pdev->dev, "%s: read write phy "
+ "register failed.\n",
+ atl1c_driver_name);
}
}
/* clear phy interrupt */
atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
/* Config MAC Ctrl register */
- mac_ctrl_data = MAC_CTRL_RX_EN;
- /* set to 10/100M halt duplex */
- mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
- mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
- MAC_CTRL_PRMLEN_MASK) <<
- MAC_CTRL_PRMLEN_SHIFT);
-
if (adapter->vlgrp)
mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
@@ -2467,37 +2516,30 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc & AT_WUFC_MAG)
mac_ctrl_data |= MAC_CTRL_BC_EN;
- if (netif_msg_hw(adapter))
- dev_dbg(&pdev->dev,
- "%s: suspend MAC=0x%x\n",
- atl1c_driver_name, mac_ctrl_data);
+ dev_dbg(&pdev->dev,
+ "%s: suspend MAC=0x%x\n",
+ atl1c_driver_name, mac_ctrl_data);
AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
/* pcie patch */
- AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
- ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
- AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
+ device_set_wakeup_enable(&pdev->dev, 1);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- goto suspend_exit;
+ AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
+ GPHY_CTRL_EXT_RESET);
+ pci_prepare_to_sleep(pdev);
+ } else {
+ AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
+ master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
+ mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
+ mac_ctrl_data |= MAC_CTRL_DUPLX;
+ AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
+ AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
+ AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+ hw->phy_configured = false; /* re-init PHY when resume */
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
}
-wol_dis:
-
- /* WOL disabled */
- AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
-
- /* pcie patch */
- AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
- ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
- AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
-
- atl1c_phy_disable(hw);
- hw->phy_configured = false; /* re-init PHY when resume */
-
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-suspend_exit:
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -2516,9 +2558,19 @@ static int atl1c_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3cold, 0);
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
+ atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
+ ATL1C_PCIE_PHY_RESET);
atl1c_phy_reset(&adapter->hw);
atl1c_reset_mac(&adapter->hw);
+ atl1c_phy_init(&adapter->hw);
+
+#if 0
+ AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
+ pm_data &= ~PM_CTRLSTAT_PME_EN;
+ AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
+#endif
+
netif_device_attach(netdev);
if (netif_running(netdev))
atl1c_up(adapter);
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 146372fd6683..9c0ddb273ac8 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -436,8 +436,8 @@ struct rx_free_desc {
__le16 buf_len; /* Size of the receive buffer in host memory */
u16 coalese; /* Update consumer index to host after the
* reception of this frame */
- /* __attribute__ ((packed)) is required */
-} __attribute__ ((packed));
+ /* __packed is required */
+} __packed;
/*
* The L1 transmit packet descriptor is comprised of four 32-bit words.
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 293f9c16e786..3d52538df6c4 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -150,9 +150,8 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
unsigned long offset,
enum dma_data_direction dir)
{
- ssb_dma_sync_single_range_for_device(sdev, dma_base,
- offset & dma_desc_align_mask,
- dma_desc_sync_size, dir);
+ dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
+ dma_desc_sync_size, dir);
}
static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
@@ -160,9 +159,8 @@ static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
unsigned long offset,
enum dma_data_direction dir)
{
- ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
- offset & dma_desc_align_mask,
- dma_desc_sync_size, dir);
+ dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
+ dma_desc_sync_size, dir);
}
static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
@@ -608,10 +606,10 @@ static void b44_tx(struct b44 *bp)
BUG_ON(skb == NULL);
- ssb_dma_unmap_single(bp->sdev,
- rp->mapping,
- skb->len,
- DMA_TO_DEVICE);
+ dma_unmap_single(bp->sdev->dma_dev,
+ rp->mapping,
+ skb->len,
+ DMA_TO_DEVICE);
rp->skb = NULL;
dev_kfree_skb_irq(skb);
}
@@ -648,29 +646,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
if (skb == NULL)
return -ENOMEM;
- mapping = ssb_dma_map_single(bp->sdev, skb->data,
- RX_PKT_BUF_SZ,
- DMA_FROM_DEVICE);
+ mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+ RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
/* Hardware bug work-around, the chip is unable to do PCI DMA
to/from anything above 1GB :-( */
- if (ssb_dma_mapping_error(bp->sdev, mapping) ||
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
/* Sigh... */
- if (!ssb_dma_mapping_error(bp->sdev, mapping))
- ssb_dma_unmap_single(bp->sdev, mapping,
+ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+ dma_unmap_single(bp->sdev->dma_dev, mapping,
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
if (skb == NULL)
return -ENOMEM;
- mapping = ssb_dma_map_single(bp->sdev, skb->data,
- RX_PKT_BUF_SZ,
- DMA_FROM_DEVICE);
- if (ssb_dma_mapping_error(bp->sdev, mapping) ||
- mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
- if (!ssb_dma_mapping_error(bp->sdev, mapping))
- ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+ mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+ RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+ dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
return -ENOMEM;
}
@@ -745,9 +743,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dest_idx * sizeof(*dest_desc),
DMA_BIDIRECTIONAL);
- ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
- RX_PKT_BUF_SZ,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
+ RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
}
static int b44_rx(struct b44 *bp, int budget)
@@ -767,9 +765,9 @@ static int b44_rx(struct b44 *bp, int budget)
struct rx_header *rh;
u16 len;
- ssb_dma_sync_single_for_cpu(bp->sdev, map,
- RX_PKT_BUF_SZ,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
+ RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
rh = (struct rx_header *) skb->data;
len = le16_to_cpu(rh->len);
if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
@@ -801,8 +799,8 @@ static int b44_rx(struct b44 *bp, int budget)
skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
if (skb_size < 0)
goto drop_it;
- ssb_dma_unmap_single(bp->sdev, map,
- skb_size, DMA_FROM_DEVICE);
+ dma_unmap_single(bp->sdev->dma_dev, map,
+ skb_size, DMA_FROM_DEVICE);
/* Leave out rx_header */
skb_put(skb, len + RX_PKT_OFFSET);
skb_pull(skb, RX_PKT_OFFSET);
@@ -954,24 +952,24 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_out;
}
- mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
- if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+ mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
struct sk_buff *bounce_skb;
/* Chip can't handle DMA to/from >1GB, use bounce buffer */
- if (!ssb_dma_mapping_error(bp->sdev, mapping))
- ssb_dma_unmap_single(bp->sdev, mapping, len,
+ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+ dma_unmap_single(bp->sdev->dma_dev, mapping, len,
DMA_TO_DEVICE);
bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb)
goto err_out;
- mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
- len, DMA_TO_DEVICE);
- if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
- if (!ssb_dma_mapping_error(bp->sdev, mapping))
- ssb_dma_unmap_single(bp->sdev, mapping,
+ mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+ if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+ dma_unmap_single(bp->sdev->dma_dev, mapping,
len, DMA_TO_DEVICE);
dev_kfree_skb_any(bounce_skb);
goto err_out;
@@ -1068,8 +1066,8 @@ static void b44_free_rings(struct b44 *bp)
if (rp->skb == NULL)
continue;
- ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
- DMA_FROM_DEVICE);
+ dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(rp->skb);
rp->skb = NULL;
}
@@ -1080,8 +1078,8 @@ static void b44_free_rings(struct b44 *bp)
if (rp->skb == NULL)
continue;
- ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
- DMA_TO_DEVICE);
+ dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(rp->skb);
rp->skb = NULL;
}
@@ -1103,14 +1101,12 @@ static void b44_init_rings(struct b44 *bp)
memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
if (bp->flags & B44_FLAG_RX_RING_HACK)
- ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
- DMA_TABLE_BYTES,
- DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
if (bp->flags & B44_FLAG_TX_RING_HACK)
- ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
- DMA_TABLE_BYTES,
- DMA_TO_DEVICE);
+ dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES, DMA_TO_DEVICE);
for (i = 0; i < bp->rx_pending; i++) {
if (b44_alloc_rx_skb(bp, -1, i) < 0)
@@ -1130,27 +1126,23 @@ static void b44_free_consistent(struct b44 *bp)
bp->tx_buffers = NULL;
if (bp->rx_ring) {
if (bp->flags & B44_FLAG_RX_RING_HACK) {
- ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
- DMA_TABLE_BYTES,
- DMA_BIDIRECTIONAL);
+ dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
kfree(bp->rx_ring);
} else
- ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
- bp->rx_ring, bp->rx_ring_dma,
- GFP_KERNEL);
+ dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+ bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL;
bp->flags &= ~B44_FLAG_RX_RING_HACK;
}
if (bp->tx_ring) {
if (bp->flags & B44_FLAG_TX_RING_HACK) {
- ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
- DMA_TABLE_BYTES,
- DMA_TO_DEVICE);
+ dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES, DMA_TO_DEVICE);
kfree(bp->tx_ring);
} else
- ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
- bp->tx_ring, bp->tx_ring_dma,
- GFP_KERNEL);
+ dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+ bp->tx_ring, bp->tx_ring_dma);
bp->tx_ring = NULL;
bp->flags &= ~B44_FLAG_TX_RING_HACK;
}
@@ -1175,7 +1167,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
goto out_err;
size = DMA_TABLE_BYTES;
- bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
+ bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+ &bp->rx_ring_dma, gfp);
if (!bp->rx_ring) {
/* Allocation may have failed due to pci_alloc_consistent
insisting on use of GFP_DMA, which is more restrictive
@@ -1187,11 +1180,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
if (!rx_ring)
goto out_err;
- rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
- DMA_TABLE_BYTES,
- DMA_BIDIRECTIONAL);
+ rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
+ DMA_TABLE_BYTES,
+ DMA_BIDIRECTIONAL);
- if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
+ if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
rx_ring_dma + size > DMA_BIT_MASK(30)) {
kfree(rx_ring);
goto out_err;
@@ -1202,7 +1195,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
bp->flags |= B44_FLAG_RX_RING_HACK;
}
- bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
+ bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+ &bp->tx_ring_dma, gfp);
if (!bp->tx_ring) {
/* Allocation may have failed due to ssb_dma_alloc_consistent
insisting on use of GFP_DMA, which is more restrictive
@@ -1214,11 +1208,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
if (!tx_ring)
goto out_err;
- tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
- DMA_TABLE_BYTES,
- DMA_TO_DEVICE);
+ tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
+ DMA_TABLE_BYTES,
+ DMA_TO_DEVICE);
- if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
+ if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
tx_ring_dma + size > DMA_BIT_MASK(30)) {
kfree(tx_ring);
goto out_err;
@@ -2176,12 +2170,14 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
"Failed to powerup the bus\n");
goto err_out_free_dev;
}
- err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
- if (err) {
+
+ if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
+ dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
dev_err(sdev->dev,
"Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
}
+
err = b44_get_invariants(bp);
if (err) {
dev_err(sdev->dev,
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index b9ad799c719f..ee1ad9693c8f 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -186,7 +186,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
{
- int cnt = 0, wait = 5;
+ int msecs = 0;
u32 ready;
do {
@@ -201,15 +201,14 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (ready)
break;
- if (cnt > 4000000) {
+ if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
return -1;
}
- if (cnt > 50)
- wait = 200;
- cnt += wait;
- udelay(wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(1));
+ msecs++;
} while (true);
return 0;
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 063026de4957..06839676e3c4 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -192,7 +192,7 @@ struct amap_eth_hdr_wrb {
u8 event;
u8 crc;
u8 forward;
- u8 ipsec;
+ u8 lso6;
u8 mgmt;
u8 ipcs;
u8 udpcs;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 54b14272f333..01eb447f98b6 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -373,10 +373,12 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
- if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
hdr, skb_shinfo(skb)->gso_size);
+ if (skb_is_gso_v6(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (is_tcp_pkt(skb))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -1735,6 +1737,44 @@ done:
adapter->isr_registered = false;
}
+static int be_close(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_eq_obj *rx_eq = &adapter->rx_eq;
+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ int vec;
+
+ cancel_delayed_work_sync(&adapter->work);
+
+ be_async_mcc_disable(adapter);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ adapter->link_up = false;
+
+ be_intr_set(adapter, false);
+
+ if (adapter->msix_enabled) {
+ vec = be_msix_vec_get(adapter, tx_eq->q.id);
+ synchronize_irq(vec);
+ vec = be_msix_vec_get(adapter, rx_eq->q.id);
+ synchronize_irq(vec);
+ } else {
+ synchronize_irq(netdev->irq);
+ }
+ be_irq_unregister(adapter);
+
+ napi_disable(&rx_eq->napi);
+ napi_disable(&tx_eq->napi);
+
+ /* Wait for all pending tx completions to arrive so that
+ * all tx skbs are freed.
+ */
+ be_tx_compl_clean(adapter);
+
+ return 0;
+}
+
static int be_open(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1765,27 +1805,29 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
- goto ret_sts;
+ goto err;
be_link_status_update(adapter, link_up);
- if (be_physfn(adapter))
+ if (be_physfn(adapter)) {
status = be_vid_config(adapter);
- if (status)
- goto ret_sts;
+ if (status)
+ goto err;
- if (be_physfn(adapter)) {
status = be_cmd_set_flow_control(adapter,
adapter->tx_fc, adapter->rx_fc);
if (status)
- goto ret_sts;
+ goto err;
}
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-ret_sts:
- return status;
+ return 0;
+err:
+ be_close(adapter->netdev);
+ return -EIO;
}
static int be_setup_wol(struct be_adapter *adapter, bool enable)
@@ -1913,43 +1955,6 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
-static int be_close(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *rx_eq = &adapter->rx_eq;
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
- int vec;
-
- cancel_delayed_work_sync(&adapter->work);
-
- be_async_mcc_disable(adapter);
-
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
- adapter->link_up = false;
-
- be_intr_set(adapter, false);
-
- if (adapter->msix_enabled) {
- vec = be_msix_vec_get(adapter, tx_eq->q.id);
- synchronize_irq(vec);
- vec = be_msix_vec_get(adapter, rx_eq->q.id);
- synchronize_irq(vec);
- } else {
- synchronize_irq(netdev->irq);
- }
- be_irq_unregister(adapter);
-
- napi_disable(&rx_eq->napi);
- napi_disable(&tx_eq->napi);
-
- /* Wait for all pending tx completions to arrive so that
- * all tx skbs are freed.
- */
- be_tx_compl_clean(adapter);
-
- return 0;
-}
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
char flash_cookie[2][16] = {"*** SE FLAS",
@@ -2183,7 +2188,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
- NETIF_F_GRO;
+ NETIF_F_GRO | NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 368f33313fb6..012613fde3f4 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -922,61 +922,73 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
# define bfin_tx_hwtstamp(dev, skb)
#endif
-static void adjust_tx_list(void)
+static inline void _tx_reclaim_skb(void)
+{
+ do {
+ tx_list_head->desc_a.config &= ~DMAEN;
+ tx_list_head->status.status_word = 0;
+ if (tx_list_head->skb) {
+ dev_kfree_skb(tx_list_head->skb);
+ tx_list_head->skb = NULL;
+ }
+ tx_list_head = tx_list_head->next;
+
+ } while (tx_list_head->status.status_word != 0);
+}
+
+static void tx_reclaim_skb(struct bfin_mac_local *lp)
{
int timeout_cnt = MAX_TIMEOUT_CNT;
- if (tx_list_head->status.status_word != 0 &&
- current_tx_ptr != tx_list_head) {
- goto adjust_head; /* released something, just return; */
- }
+ if (tx_list_head->status.status_word != 0)
+ _tx_reclaim_skb();
- /*
- * if nothing released, check wait condition
- * current's next can not be the head,
- * otherwise the dma will not stop as we want
- */
- if (current_tx_ptr->next->next == tx_list_head) {
+ if (current_tx_ptr->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) {
+ /* slow down polling to avoid too many queue stop. */
udelay(10);
- if (tx_list_head->status.status_word != 0 ||
- !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) {
- goto adjust_head;
- }
- if (timeout_cnt-- < 0) {
- printk(KERN_ERR DRV_NAME
- ": wait for adjust tx list head timeout\n");
+ /* reclaim skb if DMA is not running. */
+ if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
+ break;
+ if (timeout_cnt-- < 0)
break;
- }
- }
- if (tx_list_head->status.status_word != 0) {
- goto adjust_head;
}
+
+ if (timeout_cnt >= 0)
+ _tx_reclaim_skb();
+ else
+ netif_stop_queue(lp->ndev);
}
- return;
+ if (current_tx_ptr->next != tx_list_head &&
+ netif_queue_stopped(lp->ndev))
+ netif_wake_queue(lp->ndev);
+
+ if (tx_list_head != current_tx_ptr) {
+ /* shorten the timer interval if tx queue is stopped */
+ if (netif_queue_stopped(lp->ndev))
+ lp->tx_reclaim_timer.expires =
+ jiffies + (TX_RECLAIM_JIFFIES >> 4);
+ else
+ lp->tx_reclaim_timer.expires =
+ jiffies + TX_RECLAIM_JIFFIES;
+
+ mod_timer(&lp->tx_reclaim_timer,
+ lp->tx_reclaim_timer.expires);
+ }
-adjust_head:
- do {
- tx_list_head->desc_a.config &= ~DMAEN;
- tx_list_head->status.status_word = 0;
- if (tx_list_head->skb) {
- dev_kfree_skb(tx_list_head->skb);
- tx_list_head->skb = NULL;
- } else {
- printk(KERN_ERR DRV_NAME
- ": no sk_buff in a transmitted frame!\n");
- }
- tx_list_head = tx_list_head->next;
- } while (tx_list_head->status.status_word != 0 &&
- current_tx_ptr != tx_list_head);
return;
+}
+static void tx_reclaim_skb_timeout(unsigned long lp)
+{
+ tx_reclaim_skb((struct bfin_mac_local *)lp);
}
static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
u16 *data;
u32 data_align = (unsigned long)(skb->data) & 0x3;
union skb_shared_tx *shtx = skb_tx(skb);
@@ -1009,8 +1021,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
skb->len);
current_tx_ptr->desc_a.start_addr =
(u32)current_tx_ptr->packet;
- if (current_tx_ptr->status.status_word != 0)
- current_tx_ptr->status.status_word = 0;
blackfin_dcache_flush_range(
(u32)current_tx_ptr->packet,
(u32)(current_tx_ptr->packet + skb->len + 2));
@@ -1022,6 +1032,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
*/
SSYNC();
+ /* always clear status buffer before start tx dma */
+ current_tx_ptr->status.status_word = 0;
+
/* enable this packet's dma */
current_tx_ptr->desc_a.config |= DMAEN;
@@ -1037,13 +1050,14 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
out:
- adjust_tx_list();
-
bfin_tx_hwtstamp(dev, skb);
current_tx_ptr = current_tx_ptr->next;
dev->stats.tx_packets++;
dev->stats.tx_bytes += (skb->len);
+
+ tx_reclaim_skb(lp);
+
return NETDEV_TX_OK;
}
@@ -1167,8 +1181,11 @@ real_rx:
#ifdef CONFIG_NET_POLL_CONTROLLER
static void bfin_mac_poll(struct net_device *dev)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
disable_irq(IRQ_MAC_RX);
bfin_mac_interrupt(IRQ_MAC_RX, dev);
+ tx_reclaim_skb(lp);
enable_irq(IRQ_MAC_RX);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -1232,12 +1249,27 @@ static int bfin_mac_enable(void)
/* Our watchdog timed out. Called by the networking layer */
static void bfin_mac_timeout(struct net_device *dev)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
pr_debug("%s: %s\n", dev->name, __func__);
bfin_mac_disable();
- /* reset tx queue */
- tx_list_tail = tx_list_head->next;
+ del_timer(&lp->tx_reclaim_timer);
+
+ /* reset tx queue and free skb */
+ while (tx_list_head != current_tx_ptr) {
+ tx_list_head->desc_a.config &= ~DMAEN;
+ tx_list_head->status.status_word = 0;
+ if (tx_list_head->skb) {
+ dev_kfree_skb(tx_list_head->skb);
+ tx_list_head->skb = NULL;
+ }
+ tx_list_head = tx_list_head->next;
+ }
+
+ if (netif_queue_stopped(lp->ndev))
+ netif_wake_queue(lp->ndev);
bfin_mac_enable();
@@ -1430,6 +1462,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
platform_set_drvdata(pdev, ndev);
lp = netdev_priv(ndev);
+ lp->ndev = ndev;
/* Grab the MAC address in the MAC */
*(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
@@ -1485,6 +1518,10 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
ndev->netdev_ops = &bfin_mac_netdev_ops;
ndev->ethtool_ops = &bfin_mac_ethtool_ops;
+ init_timer(&lp->tx_reclaim_timer);
+ lp->tx_reclaim_timer.data = (unsigned long)lp;
+ lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
+
spin_lock_init(&lp->lock);
/* now, enable interrupts */
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 1ae7b82ceeee..04e4050df18b 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -13,9 +13,12 @@
#include <linux/net_tstamp.h>
#include <linux/clocksource.h>
#include <linux/timecompare.h>
+#include <linux/timer.h>
#define BFIN_MAC_CSUM_OFFLOAD
+#define TX_RECLAIM_JIFFIES (HZ / 5)
+
struct dma_descriptor {
struct dma_descriptor *next_dma_desc;
unsigned long start_addr;
@@ -68,6 +71,8 @@ struct bfin_mac_local {
int wol; /* Wake On Lan */
int irq_wake_requested;
+ struct timer_list tx_reclaim_timer;
+ struct net_device *ndev;
/* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 117432222a09..a5dd81face37 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1446,7 +1446,8 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
static void
bnx2_enable_forced_2g5(struct bnx2 *bp)
{
- u32 bmcr;
+ u32 uninitialized_var(bmcr);
+ int err;
if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
return;
@@ -1456,22 +1457,28 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
MII_BNX2_BLK_ADDR_SERDES_DIG);
- bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
- val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
- val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
- bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+ if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+ val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
+ val |= MII_BNX2_SD_MISC1_FORCE |
+ MII_BNX2_SD_MISC1_FORCE_2_5G;
+ bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+ }
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
- bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
- bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
- bmcr |= BCM5708S_BMCR_FORCE_2500;
+ err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ if (!err)
+ bmcr |= BCM5708S_BMCR_FORCE_2500;
} else {
return;
}
+ if (err)
+ return;
+
if (bp->autoneg & AUTONEG_SPEED) {
bmcr &= ~BMCR_ANENABLE;
if (bp->req_duplex == DUPLEX_FULL)
@@ -1483,7 +1490,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
static void
bnx2_disable_forced_2g5(struct bnx2 *bp)
{
- u32 bmcr;
+ u32 uninitialized_var(bmcr);
+ int err;
if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
return;
@@ -1493,21 +1501,26 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
MII_BNX2_BLK_ADDR_SERDES_DIG);
- bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
- val &= ~MII_BNX2_SD_MISC1_FORCE;
- bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+ if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+ val &= ~MII_BNX2_SD_MISC1_FORCE;
+ bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+ }
bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
- bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
- bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
- bmcr &= ~BCM5708S_BMCR_FORCE_2500;
+ err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+ if (!err)
+ bmcr &= ~BCM5708S_BMCR_FORCE_2500;
} else {
return;
}
+ if (err)
+ return;
+
if (bp->autoneg & AUTONEG_SPEED)
bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ff70be898765..0383e3066313 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -4266,14 +4266,16 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
MDIO_PMA_REG_10G_CTRL2, 0x0008);
}
- /* Set 2-wire transfer rate to 400Khz since 100Khz
- is not operational */
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
bnx2x_cl45_write(bp, params->port,
ext_phy_type,
ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa101);
+ 0xa001);
/* Set TX PreEmphasis if needed */
if ((params->feature_config_flags &
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 40fdc41446cc..25c14c6236f5 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -233,34 +233,27 @@ static void tlb_deinitialize(struct bonding *bond)
_unlock_tx_hashtbl(bond);
}
+static long long compute_gap(struct slave *slave)
+{
+ return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
+ (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
+}
+
/* Caller must hold bond lock for read */
static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{
struct slave *slave, *least_loaded;
- s64 max_gap;
- int i, found = 0;
-
- /* Find the first enabled slave */
- bond_for_each_slave(bond, slave, i) {
- if (SLAVE_IS_OK(slave)) {
- found = 1;
- break;
- }
- }
-
- if (!found) {
- return NULL;
- }
+ long long max_gap;
+ int i;
- least_loaded = slave;
- max_gap = (s64)(slave->speed << 20) - /* Convert to Megabit per sec */
- (s64)(SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
+ least_loaded = NULL;
+ max_gap = LLONG_MIN;
/* Find the slave with the largest gap */
- bond_for_each_slave_from(bond, slave, i, least_loaded) {
+ bond_for_each_slave(bond, slave, i) {
if (SLAVE_IS_OK(slave)) {
- s64 gap = (s64)(slave->speed << 20) -
- (s64)(SLAVE_TLB_INFO(slave).load << 3);
+ long long gap = compute_gap(slave);
+
if (max_gap < gap) {
least_loaded = slave;
max_gap = gap;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5e12462a9d5e..a95a41b74b4e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -90,6 +90,7 @@
#define BOND_LINK_ARP_INTERV 0
static int max_bonds = BOND_DEFAULT_MAX_BONDS;
+static int tx_queues = BOND_DEFAULT_TX_QUEUES;
static int num_grat_arp = 1;
static int num_unsol_na = 1;
static int miimon = BOND_LINK_MON_INTERV;
@@ -106,10 +107,13 @@ static int arp_interval = BOND_LINK_ARP_INTERV;
static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
static char *fail_over_mac;
+static int all_slaves_active = 0;
static struct bond_params bonding_defaults;
module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
+module_param(tx_queues, int, 0);
+MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
module_param(num_grat_arp, int, 0644);
MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
module_param(num_unsol_na, int, 0644);
@@ -155,6 +159,10 @@ module_param(arp_validate, charp, 0);
MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
module_param(fail_over_mac, charp, 0);
MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow");
+module_param(all_slaves_active, int, 0);
+MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
+ "by setting active flag for all slaves. "
+ "0 for never (default), 1 for always.");
/*----------------------------- Global variables ----------------------------*/
@@ -1522,16 +1530,32 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
+ /* If this is the first slave, then we need to set the master's hardware
+ * address to be the same as the slave's. */
+ if (bond->slave_cnt == 0)
+ memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
+ slave_dev->addr_len);
+
+
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
if (!new_slave) {
res = -ENOMEM;
goto err_undo_flags;
}
- /* save slave's original flags before calling
- * netdev_set_master and dev_open
+ /*
+ * Set the new_slave's queue_id to be zero. Queue ID mapping
+ * is set via sysfs or module option if desired.
*/
- new_slave->original_flags = slave_dev->flags;
+ new_slave->queue_id = 0;
+
+ /* Save slave's original mtu and then set it to match the bond */
+ new_slave->original_mtu = slave_dev->mtu;
+ res = dev_set_mtu(slave_dev, bond->dev->mtu);
+ if (res) {
+ pr_debug("Error %d calling dev_set_mtu\n", res);
+ goto err_free;
+ }
/*
* Save slave's original ("permanent") mac address for modes
@@ -1550,7 +1574,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = dev_set_mac_address(slave_dev, &addr);
if (res) {
pr_debug("Error %d calling set_mac_address\n", res);
- goto err_free;
+ goto err_restore_mtu;
}
}
@@ -1785,6 +1809,9 @@ err_restore_mac:
dev_set_mac_address(slave_dev, &addr);
}
+err_restore_mtu:
+ dev_set_mtu(slave_dev, new_slave->original_mtu);
+
err_free:
kfree(new_slave);
@@ -1969,6 +1996,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
dev_set_mac_address(slave_dev, &addr);
}
+ dev_set_mtu(slave_dev, slave->original_mtu);
+
slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
IFF_SLAVE_INACTIVE | IFF_BONDING |
IFF_SLAVE_NEEDARP);
@@ -2555,7 +2584,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
/*
* This target is not on a VLAN
*/
- if (rt->u.dst.dev == bond->dev) {
+ if (rt->dst.dev == bond->dev) {
ip_rt_put(rt);
pr_debug("basa: rtdev == bond->dev: arp_send\n");
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
@@ -2566,7 +2595,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
vlan_id = 0;
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
- if (vlan_dev == rt->u.dst.dev) {
+ if (vlan_dev == rt->dst.dev) {
vlan_id = vlan->vlan_id;
pr_debug("basa: vlan match on %s %d\n",
vlan_dev->name, vlan_id);
@@ -2584,7 +2613,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
if (net_ratelimit()) {
pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
bond->dev->name, &fl.fl4_dst,
- rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
+ rt->dst.dev ? rt->dst.dev->name : "NULL");
}
ip_rt_put(rt);
}
@@ -3265,6 +3294,7 @@ static void bond_info_show_slave(struct seq_file *seq,
else
seq_puts(seq, "Aggregator ID: N/A\n");
}
+ seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
}
static int bond_info_seq_show(struct seq_file *seq, void *v)
@@ -3774,20 +3804,21 @@ static int bond_close(struct net_device *bond_dev)
return 0;
}
-static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
+static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct net_device_stats *stats = &bond_dev->stats;
- struct net_device_stats local_stats;
+ struct rtnl_link_stats64 *stats = &bond_dev->stats64;
+ struct rtnl_link_stats64 local_stats;
struct slave *slave;
int i;
- memset(&local_stats, 0, sizeof(struct net_device_stats));
+ memset(&local_stats, 0, sizeof(local_stats));
read_lock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
- const struct net_device_stats *sstats = dev_get_stats(slave->dev);
+ const struct rtnl_link_stats64 *sstats =
+ dev_get_stats(slave->dev);
local_stats.rx_packets += sstats->rx_packets;
local_stats.rx_bytes += sstats->rx_bytes;
@@ -4401,9 +4432,59 @@ static void bond_set_xmit_hash_policy(struct bonding *bond)
}
}
+/*
+ * Lookup the slave that corresponds to a qid
+ */
+static inline int bond_slave_override(struct bonding *bond,
+ struct sk_buff *skb)
+{
+ int i, res = 1;
+ struct slave *slave = NULL;
+ struct slave *check_slave;
+
+ read_lock(&bond->lock);
+
+ if (!BOND_IS_OK(bond) || !skb->queue_mapping)
+ goto out;
+
+ /* Find out if any slaves have the same mapping as this skb. */
+ bond_for_each_slave(bond, check_slave, i) {
+ if (check_slave->queue_id == skb->queue_mapping) {
+ slave = check_slave;
+ break;
+ }
+ }
+
+ /* If the slave isn't UP, use default transmit policy. */
+ if (slave && slave->queue_id && IS_UP(slave->dev) &&
+ (slave->link == BOND_LINK_UP)) {
+ res = bond_dev_queue_xmit(bond, skb, slave->dev);
+ }
+
+out:
+ read_unlock(&bond->lock);
+ return res;
+}
+
+static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ /*
+ * This helper function exists to help dev_pick_tx get the correct
+ * destination queue. Using a helper function skips the a call to
+ * skb_tx_hash and will put the skbs in the queue we expect on their
+ * way down to the bonding driver.
+ */
+ return skb->queue_mapping;
+}
+
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- const struct bonding *bond = netdev_priv(dev);
+ struct bonding *bond = netdev_priv(dev);
+
+ if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
+ if (!bond_slave_override(bond, skb))
+ return NETDEV_TX_OK;
+ }
switch (bond->params.mode) {
case BOND_MODE_ROUNDROBIN:
@@ -4488,7 +4569,8 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_open = bond_open,
.ndo_stop = bond_close,
.ndo_start_xmit = bond_start_xmit,
- .ndo_get_stats = bond_get_stats,
+ .ndo_select_queue = bond_select_queue,
+ .ndo_get_stats64 = bond_get_stats,
.ndo_do_ioctl = bond_do_ioctl,
.ndo_set_multicast_list = bond_set_multicast_list,
.ndo_change_mtu = bond_change_mtu,
@@ -4756,6 +4838,20 @@ static int bond_check_params(struct bond_params *params)
}
}
+ if (tx_queues < 1 || tx_queues > 255) {
+ pr_warning("Warning: tx_queues (%d) should be between "
+ "1 and 255, resetting to %d\n",
+ tx_queues, BOND_DEFAULT_TX_QUEUES);
+ tx_queues = BOND_DEFAULT_TX_QUEUES;
+ }
+
+ if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
+ pr_warning("Warning: all_slaves_active module parameter (%d), "
+ "not of valid value (0/1), so it was set to "
+ "0\n", all_slaves_active);
+ all_slaves_active = 0;
+ }
+
/* reset values for TLB/ALB */
if ((bond_mode == BOND_MODE_TLB) ||
(bond_mode == BOND_MODE_ALB)) {
@@ -4926,6 +5022,8 @@ static int bond_check_params(struct bond_params *params)
params->primary[0] = 0;
params->primary_reselect = primary_reselect_value;
params->fail_over_mac = fail_over_mac_value;
+ params->tx_queues = tx_queues;
+ params->all_slaves_active = all_slaves_active;
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
@@ -5012,8 +5110,8 @@ int bond_create(struct net *net, const char *name)
rtnl_lock();
- bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
- bond_setup);
+ bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "",
+ bond_setup, tx_queues);
if (!bond_dev) {
pr_err("%s: eek! can't alloc netdev!\n", name);
rtnl_unlock();
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b8bec086daa1..f9a034361a8e 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -211,7 +211,8 @@ static ssize_t bonding_show_slaves(struct device *d,
/*
* Set the slaves in the current bond. The bond interface must be
* up for this to succeed.
- * This function is largely the same flow as bonding_update_bonds().
+ * This is supposed to be only thin wrapper for bond_enslave and bond_release.
+ * All hard work should be done there.
*/
static ssize_t bonding_store_slaves(struct device *d,
struct device_attribute *attr,
@@ -219,10 +220,8 @@ static ssize_t bonding_store_slaves(struct device *d,
{
char command[IFNAMSIZ + 1] = { 0, };
char *ifname;
- int i, res, found, ret = count;
- u32 original_mtu;
- struct slave *slave;
- struct net_device *dev = NULL;
+ int res, ret = count;
+ struct net_device *dev;
struct bonding *bond = to_bond(d);
/* Quick sanity check -- is the bond interface up? */
@@ -231,8 +230,6 @@ static ssize_t bonding_store_slaves(struct device *d,
bond->dev->name);
}
- /* Note: We can't hold bond->lock here, as bond_create grabs it. */
-
if (!rtnl_trylock())
return restart_syscall();
@@ -242,91 +239,33 @@ static ssize_t bonding_store_slaves(struct device *d,
!dev_valid_name(ifname))
goto err_no_cmd;
- if (command[0] == '+') {
-
- /* Got a slave name in ifname. Is it already in the list? */
- found = 0;
-
- dev = __dev_get_by_name(dev_net(bond->dev), ifname);
- if (!dev) {
- pr_info("%s: Interface %s does not exist!\n",
- bond->dev->name, ifname);
- ret = -ENODEV;
- goto out;
- }
-
- if (dev->flags & IFF_UP) {
- pr_err("%s: Error: Unable to enslave %s because it is already up.\n",
- bond->dev->name, dev->name);
- ret = -EPERM;
- goto out;
- }
-
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i)
- if (slave->dev == dev) {
- pr_err("%s: Interface %s is already enslaved!\n",
- bond->dev->name, ifname);
- ret = -EPERM;
- read_unlock(&bond->lock);
- goto out;
- }
- read_unlock(&bond->lock);
-
- pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname);
-
- /* If this is the first slave, then we need to set
- the master's hardware address to be the same as the
- slave's. */
- if (is_zero_ether_addr(bond->dev->dev_addr))
- memcpy(bond->dev->dev_addr, dev->dev_addr,
- dev->addr_len);
-
- /* Set the slave's MTU to match the bond */
- original_mtu = dev->mtu;
- res = dev_set_mtu(dev, bond->dev->mtu);
- if (res) {
- ret = res;
- goto out;
- }
+ dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+ if (!dev) {
+ pr_info("%s: Interface %s does not exist!\n",
+ bond->dev->name, ifname);
+ ret = -ENODEV;
+ goto out;
+ }
+ switch (command[0]) {
+ case '+':
+ pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
res = bond_enslave(bond->dev, dev);
- bond_for_each_slave(bond, slave, i)
- if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0)
- slave->original_mtu = original_mtu;
- if (res)
- ret = res;
+ break;
- goto out;
- }
+ case '-':
+ pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+ res = bond_release(bond->dev, dev);
+ break;
- if (command[0] == '-') {
- dev = NULL;
- original_mtu = 0;
- bond_for_each_slave(bond, slave, i)
- if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
- dev = slave->dev;
- original_mtu = slave->original_mtu;
- break;
- }
- if (dev) {
- pr_info("%s: Removing slave %s\n",
- bond->dev->name, dev->name);
- res = bond_release(bond->dev, dev);
- if (res) {
- ret = res;
- goto out;
- }
- /* set the slave MTU to the default */
- dev_set_mtu(dev, original_mtu);
- } else {
- pr_err("unable to remove non-existent slave %s for bond %s.\n",
- ifname, bond->dev->name);
- ret = -ENODEV;
- }
- goto out;
+ default:
+ goto err_no_cmd;
}
+ if (res)
+ ret = res;
+ goto out;
+
err_no_cmd:
pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
bond->dev->name);
@@ -1472,7 +1411,173 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
}
static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
+/*
+ * Show the queue_ids of the slaves in the current bond.
+ */
+static ssize_t bonding_show_queue_id(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct slave *slave;
+ int i, res = 0;
+ struct bonding *bond = to_bond(d);
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+ read_lock(&bond->lock);
+ bond_for_each_slave(bond, slave, i) {
+ if (res > (PAGE_SIZE - 6)) {
+ /* not enough space for another interface name */
+ if ((PAGE_SIZE - res) > 10)
+ res = PAGE_SIZE - 10;
+ res += sprintf(buf + res, "++more++ ");
+ break;
+ }
+ res += sprintf(buf + res, "%s:%d ",
+ slave->dev->name, slave->queue_id);
+ }
+ read_unlock(&bond->lock);
+ if (res)
+ buf[res-1] = '\n'; /* eat the leftover space */
+ rtnl_unlock();
+ return res;
+}
+
+/*
+ * Set the queue_ids of the slaves in the current bond. The bond
+ * interface must be enslaved for this to work.
+ */
+static ssize_t bonding_store_queue_id(struct device *d,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct slave *slave, *update_slave;
+ struct bonding *bond = to_bond(d);
+ u16 qid;
+ int i, ret = count;
+ char *delim;
+ struct net_device *sdev = NULL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ /* delim will point to queue id if successful */
+ delim = strchr(buffer, ':');
+ if (!delim)
+ goto err_no_cmd;
+
+ /*
+ * Terminate string that points to device name and bump it
+ * up one, so we can read the queue id there.
+ */
+ *delim = '\0';
+ if (sscanf(++delim, "%hd\n", &qid) != 1)
+ goto err_no_cmd;
+
+ /* Check buffer length, valid ifname and queue id */
+ if (strlen(buffer) > IFNAMSIZ ||
+ !dev_valid_name(buffer) ||
+ qid > bond->params.tx_queues)
+ goto err_no_cmd;
+
+ /* Get the pointer to that interface if it exists */
+ sdev = __dev_get_by_name(dev_net(bond->dev), buffer);
+ if (!sdev)
+ goto err_no_cmd;
+
+ read_lock(&bond->lock);
+
+ /* Search for thes slave and check for duplicate qids */
+ update_slave = NULL;
+ bond_for_each_slave(bond, slave, i) {
+ if (sdev == slave->dev)
+ /*
+ * We don't need to check the matching
+ * slave for dups, since we're overwriting it
+ */
+ update_slave = slave;
+ else if (qid && qid == slave->queue_id) {
+ goto err_no_cmd_unlock;
+ }
+ }
+
+ if (!update_slave)
+ goto err_no_cmd_unlock;
+
+ /* Actually set the qids for the slave */
+ update_slave->queue_id = qid;
+
+ read_unlock(&bond->lock);
+out:
+ rtnl_unlock();
+ return ret;
+
+err_no_cmd_unlock:
+ read_unlock(&bond->lock);
+err_no_cmd:
+ pr_info("invalid input for queue_id set for %s.\n",
+ bond->dev->name);
+ ret = -EPERM;
+ goto out;
+}
+
+static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
+ bonding_store_queue_id);
+
+
+/*
+ * Show and set the all_slaves_active flag.
+ */
+static ssize_t bonding_show_slaves_active(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ return sprintf(buf, "%d\n", bond->params.all_slaves_active);
+}
+
+static ssize_t bonding_store_slaves_active(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int i, new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+ struct slave *slave;
+
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no all_slaves_active value specified.\n",
+ bond->dev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (new_value == bond->params.all_slaves_active)
+ goto out;
+
+ if ((new_value == 0) || (new_value == 1)) {
+ bond->params.all_slaves_active = new_value;
+ } else {
+ pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
+ bond->dev->name, new_value);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->state == BOND_STATE_BACKUP) {
+ if (new_value)
+ slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE;
+ else
+ slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
+ }
+ }
+out:
+ return count;
+}
+static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
+ bonding_show_slaves_active, bonding_store_slaves_active);
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
@@ -1499,6 +1604,8 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_ad_actor_key.attr,
&dev_attr_ad_partner_key.attr,
&dev_attr_ad_partner_mac.attr,
+ &dev_attr_queue_id.attr,
+ &dev_attr_all_slaves_active.attr,
NULL,
};
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2aa336720591..c6fdd851579a 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,8 +23,8 @@
#include "bond_3ad.h"
#include "bond_alb.h"
-#define DRV_VERSION "3.6.0"
-#define DRV_RELDATE "September 26, 2009"
+#define DRV_VERSION "3.7.0"
+#define DRV_RELDATE "June 2, 2010"
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
@@ -60,6 +60,9 @@
((mode) == BOND_MODE_TLB) || \
((mode) == BOND_MODE_ALB))
+#define TX_QUEUE_OVERRIDE(mode) \
+ (((mode) == BOND_MODE_ACTIVEBACKUP) || \
+ ((mode) == BOND_MODE_ROUNDROBIN))
/*
* Less bad way to call ioctl from within the kernel; this needs to be
* done some other way to get the call out of interrupt context.
@@ -131,6 +134,8 @@ struct bond_params {
char primary[IFNAMSIZ];
int primary_reselect;
__be32 arp_targets[BOND_MAX_ARP_TARGETS];
+ int tx_queues;
+ int all_slaves_active;
};
struct bond_parm_tbl {
@@ -159,12 +164,12 @@ struct slave {
s8 link; /* one of BOND_LINK_XXXX */
s8 new_link;
s8 state; /* one of BOND_STATE_XXXX */
- u32 original_flags;
u32 original_mtu;
u32 link_failure_count;
u8 perm_hwaddr[ETH_ALEN];
u16 speed;
u8 duplex;
+ u16 queue_id;
struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
struct tlb_slave_info tlb_info;
};
@@ -291,7 +296,8 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave)
struct bonding *bond = netdev_priv(slave->dev->master);
if (!bond_is_lb(bond))
slave->state = BOND_STATE_BACKUP;
- slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
+ if (!bond->params.all_slaves_active)
+ slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
if (slave_do_arp_validate(bond, slave))
slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 09257ca8f563..3df0c0f8b8bf 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -174,6 +174,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
struct ser_device *ser;
int ret;
u8 *p;
+
ser = tty->disc_data;
/*
@@ -221,6 +222,7 @@ static int handle_tx(struct ser_device *ser)
struct tty_struct *tty;
struct sk_buff *skb;
int tty_wr, len, room;
+
tty = ser->tty;
ser->tx_started = true;
@@ -281,6 +283,7 @@ error:
static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
+
BUG_ON(dev == NULL);
ser = netdev_priv(dev);
@@ -299,6 +302,7 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
static void ldisc_tx_wakeup(struct tty_struct *tty)
{
struct ser_device *ser;
+
ser = tty->disc_data;
BUG_ON(ser == NULL);
BUG_ON(ser->tty != tty);
@@ -348,6 +352,7 @@ static void ldisc_close(struct tty_struct *tty)
struct ser_device *ser = tty->disc_data;
/* Remove may be called inside or outside of rtnl_lock */
int islocked = rtnl_is_locked();
+
if (!islocked)
rtnl_lock();
/* device is freed automagically by net-sysfs */
@@ -374,6 +379,7 @@ static struct tty_ldisc_ops caif_ldisc = {
static int register_ldisc(void)
{
int result;
+
result = tty_register_ldisc(N_CAIF, &caif_ldisc);
if (result < 0) {
pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
@@ -391,12 +397,12 @@ static const struct net_device_ops netdev_ops = {
static void caifdev_setup(struct net_device *dev)
{
struct ser_device *serdev = netdev_priv(dev);
+
dev->features = 0;
dev->netdev_ops = &netdev_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CAIF_MAX_MTU;
- dev->hard_header_len = CAIF_NEEDED_HEADROOM;
dev->tx_queue_len = 0;
dev->destructor = free_netdev;
skb_queue_head_init(&serdev->head);
@@ -410,8 +416,6 @@ static void caifdev_setup(struct net_device *dev)
static int caif_net_open(struct net_device *dev)
{
- struct ser_device *ser;
- ser = netdev_priv(dev);
netif_wake_queue(dev);
return 0;
}
@@ -425,6 +429,7 @@ static int caif_net_close(struct net_device *dev)
static int __init caif_ser_init(void)
{
int ret;
+
ret = register_ldisc();
debugfsdir = debugfs_create_dir("caif_serial", NULL);
return ret;
@@ -435,6 +440,7 @@ static void __exit caif_ser_exit(void)
struct ser_device *ser = NULL;
struct list_head *node;
struct list_head *_tmp;
+
list_for_each_safe(node, _tmp, &ser_list) {
ser = list_entry(node, struct ser_device, node);
dev_close(ser->dev);
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 4ff966473bc9..b43e9f5d3268 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -227,7 +227,7 @@ struct mscan_regs {
u16 time; /* + 0x7c 0x3e */
} tx;
_MSCAN_RESERVED_(32, 2); /* + 0x7e */
-} __attribute__ ((packed));
+} __packed;
#undef _MSCAN_RESERVED_
#define MSCAN_REGION sizeof(struct mscan)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 1fc0871d2ef7..e75f1a876972 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -197,7 +197,7 @@ struct cpc_can_err_counter {
};
/* Main message type used between library and application */
-struct __attribute__ ((packed)) ems_cpc_msg {
+struct __packed ems_cpc_msg {
u8 type; /* type of message */
u8 length; /* length of data within union 'msg' */
u8 msgid; /* confirmation handle */
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 036b2dfb1d40..092f31a126e6 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -286,7 +286,6 @@ struct board_info {
unsigned int clock_mc3;
unsigned int clock_mc4;
unsigned int espi_nports;
- unsigned int clock_cspi;
unsigned int clock_elmer0;
unsigned char mdio_mdien;
unsigned char mdio_mdiinv;
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 53bde15fc94d..599d178df62d 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -185,9 +185,6 @@ static int t1_pci_intr_handler(adapter_t *adapter)
return 0;
}
-#ifdef CONFIG_CHELSIO_T1_COUGAR
-#include "cspi.h"
-#endif
#ifdef CONFIG_CHELSIO_T1_1G
#include "fpga_defs.h"
@@ -280,7 +277,7 @@ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
}
-#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+#if defined(CONFIG_CHELSIO_T1_1G)
/*
* Elmer MI1 MDIO read/write operations.
*/
@@ -317,7 +314,7 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
return 0;
}
-#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR)
+#if defined(CONFIG_CHELSIO_T1_1G)
static const struct mdio_ops mi1_mdio_ops = {
.init = mi1_mdio_init,
.read = mi1_mdio_read,
@@ -752,31 +749,6 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
mod_detect ? "removed" : "inserted");
}
break;
-#ifdef CONFIG_CHELSIO_T1_COUGAR
- case CHBT_BOARD_COUGAR:
- if (adapter->params.nports == 1) {
- if (cause & ELMER0_GP_BIT1) { /* Vitesse MAC */
- struct cmac *mac = adapter->port[0].mac;
- mac->ops->interrupt_handler(mac);
- }
- if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */
- }
- } else {
- int i, port_bit;
-
- for_each_port(adapter, i) {
- port_bit = i ? i + 1 : 0;
- if (!(cause & (1 << port_bit)))
- continue;
-
- phy = adapter->port[i].phy;
- phy_cause = phy->ops->interrupt_handler(phy);
- if (phy_cause & cphy_cause_link_change)
- t1_link_changed(adapter, i);
- }
- }
- break;
-#endif
}
t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
return 0;
@@ -955,7 +927,6 @@ static int board_init(adapter_t *adapter, const struct board_info *bi)
case CHBT_BOARD_N110:
case CHBT_BOARD_N210:
case CHBT_BOARD_CHT210:
- case CHBT_BOARD_COUGAR:
t1_tpi_par(adapter, 0xf);
t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
break;
@@ -1004,10 +975,6 @@ int t1_init_hw_modules(adapter_t *adapter)
adapter->regs + A_MC5_CONFIG);
}
-#ifdef CONFIG_CHELSIO_T1_COUGAR
- if (adapter->cspi && t1_cspi_init(adapter->cspi))
- goto out_err;
-#endif
if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
bi->espi_nports))
goto out_err;
@@ -1061,10 +1028,6 @@ void t1_free_sw_modules(adapter_t *adapter)
t1_tp_destroy(adapter->tp);
if (adapter->espi)
t1_espi_destroy(adapter->espi);
-#ifdef CONFIG_CHELSIO_T1_COUGAR
- if (adapter->cspi)
- t1_cspi_destroy(adapter->cspi);
-#endif
}
static void __devinit init_link_config(struct link_config *lc,
@@ -1084,14 +1047,6 @@ static void __devinit init_link_config(struct link_config *lc,
}
}
-#ifdef CONFIG_CHELSIO_T1_COUGAR
- if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
- pr_err("%s: CSPI initialization failed\n",
- adapter->name);
- goto error;
- }
-#endif
-
/*
* Allocate and initialize the data structures that hold the SW state of
* the Terminator HW modules.
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 80471269977a..04e299f46455 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2824,7 +2824,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
err = ip_route_output_key(&init_net, &rt, &fl);
if (!err)
- *dst = &rt->u.dst;
+ *dst = &rt->dst;
return err;
#else
return -ENETUNREACH;
@@ -2996,7 +2996,7 @@ err_out:
static int cnic_cm_abort(struct cnic_sock *csk)
{
struct cnic_local *cp = csk->dev->cnic_priv;
- u32 opcode;
+ u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
if (!cnic_in_use(csk))
return -EINVAL;
@@ -3008,12 +3008,9 @@ static int cnic_cm_abort(struct cnic_sock *csk)
* connect was not successful.
*/
- csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
- if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
- opcode = csk->state;
- else
- opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
cp->close_conn(csk, opcode);
+ if (csk->state != opcode)
+ return -EALREADY;
return 0;
}
@@ -3026,6 +3023,8 @@ static int cnic_cm_close(struct cnic_sock *csk)
if (cnic_close_prep(csk)) {
csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
return cnic_cm_close_req(csk);
+ } else {
+ return -EALREADY;
}
return 0;
}
@@ -3141,12 +3140,6 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
break;
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
- if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
- cnic_cm_upcall(cp, csk, opcode);
- break;
- } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
- csk->state = opcode;
- /* fall through */
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
@@ -3202,19 +3195,22 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
{
- if ((opcode == csk->state) ||
- (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
- csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
- if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
- return 1;
+ if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+ /* Unsolicited RESET_COMP or RESET_RECEIVED */
+ opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+ csk->state = opcode;
}
- /* 57710+ only workaround to handle unsolicited RESET_COMP
- * which will be treated like a RESET RCVD notification
- * which triggers the clean up procedure
+
+ /* 1. If event opcode matches the expected event in csk->state
+ * 2. If the expected event is CLOSE_COMP, we accept any event
+ * 3. If the expected event is 0, meaning the connection was never
+ * never established, we accept the opcode from cm_abort.
*/
- else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
+ if (opcode == csk->state || csk->state == 0 ||
+ csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
- csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+ if (csk->state == 0)
+ csk->state = opcode;
return 1;
}
}
@@ -3226,8 +3222,14 @@ static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
+ if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
+ cnic_cm_upcall(cp, csk, opcode);
+ return;
+ }
+
clear_bit(SK_F_CONNECT_START, &csk->flags);
cnic_close_conn(csk);
+ csk->state = opcode;
cnic_cm_upcall(cp, csk, opcode);
}
@@ -3257,8 +3259,12 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
- if (cnic_ready_to_close(csk, opcode))
- cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+ if (cnic_ready_to_close(csk, opcode)) {
+ if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+ else
+ close_complete = 1;
+ }
break;
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index e3f1b8566495..066fd5b09fda 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2311,15 +2311,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
/* Check t.len sanity ? */
- fw_data = kmalloc(t.len, GFP_KERNEL);
- if (!fw_data)
- return -ENOMEM;
-
- if (copy_from_user
- (fw_data, useraddr + sizeof(t), t.len)) {
- kfree(fw_data);
- return -EFAULT;
- }
+ fw_data = memdup_user(useraddr + sizeof(t), t.len);
+ if (IS_ERR(fw_data))
+ return PTR_ERR(fw_data);
ret = t3_load_fw(adapter, fw_data, t.len);
kfree(fw_data);
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index dd1770e075e6..62804bb4022d 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -219,6 +219,10 @@ struct adapter_params {
struct vpd_params vpd;
struct pci_params pci;
+ unsigned int sf_size; /* serial flash size in bytes */
+ unsigned int sf_nsec; /* # of flash sectors */
+ unsigned int sf_fw_start; /* start of FW image in flash */
+
unsigned int fw_vers;
unsigned int tp_vers;
u8 api_vers[7];
@@ -305,7 +309,6 @@ enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
USING_MSI = (1 << 1),
USING_MSIX = (1 << 2),
- QUEUES_BOUND = (1 << 3),
FW_OK = (1 << 4),
};
@@ -646,6 +649,7 @@ void t4_intr_disable(struct adapter *adapter);
void t4_intr_clear(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
+int t4_wait_dev_ready(struct adapter *adap);
int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 58045b00cf40..27f65b501a0a 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -216,7 +216,7 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
void t4_os_portmod_changed(const struct adapter *adap, int port_id)
{
static const char *mod_str[] = {
- NULL, "LR", "SR", "ER", "passive DA", "active DA"
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
};
const struct net_device *dev = adap->port[port_id];
@@ -224,7 +224,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
netdev_info(dev, "port module unplugged\n");
- else
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
}
@@ -1234,7 +1234,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
{
unsigned int v = 0;
- if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+ type == FW_PORT_TYPE_BT_XAUI) {
v |= SUPPORTED_TP;
if (caps & FW_PORT_CAP_SPEED_100M)
v |= SUPPORTED_100baseT_Full;
@@ -1250,7 +1251,10 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
v |= SUPPORTED_10000baseKX4_Full;
} else if (type == FW_PORT_TYPE_KR)
v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
- else if (type == FW_PORT_TYPE_FIBER)
+ else if (type == FW_PORT_TYPE_BP_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
+ else if (type == FW_PORT_TYPE_FIBER_XFI ||
+ type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
if (caps & FW_PORT_CAP_ANEG)
@@ -1276,13 +1280,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
const struct port_info *p = netdev_priv(dev);
if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XFI ||
p->port_type == FW_PORT_TYPE_BT_XAUI)
cmd->port = PORT_TP;
- else if (p->port_type == FW_PORT_TYPE_FIBER)
+ else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+ p->port_type == FW_PORT_TYPE_FIBER_XAUI)
cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_TWINAX)
- cmd->port = PORT_DA;
- else
+ else if (p->port_type == FW_PORT_TYPE_SFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_FIBRE;
+ } else
cmd->port = PORT_OTHER;
if (p->mdio_addr >= 0) {
@@ -2483,6 +2493,7 @@ static void cxgb_down(struct adapter *adapter)
t4_intr_disable(adapter);
cancel_work_sync(&adapter->tid_release_task);
adapter->tid_release_task_busy = false;
+ adapter->tid_release_head = NULL;
if (adapter->flags & USING_MSIX) {
free_msix_queue_irqs(adapter);
@@ -2511,9 +2522,10 @@ static int cxgb_open(struct net_device *dev)
}
dev->real_num_tx_queues = pi->nqsets;
- link_start(dev);
- netif_tx_start_all_queues(dev);
- return 0;
+ err = link_start(dev);
+ if (!err)
+ netif_tx_start_all_queues(dev);
+ return err;
}
static int cxgb_close(struct net_device *dev)
@@ -2526,12 +2538,12 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, 0, pi->viid, false, false);
}
-static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev)
{
struct port_stats stats;
struct port_info *p = netdev_priv(dev);
struct adapter *adapter = p->adapter;
- struct net_device_stats *ns = &dev->stats;
+ struct rtnl_link_stats64 *ns = &dev->stats64;
spin_lock(&adapter->stats_lock);
t4_get_port_stats(adapter, p->tx_chan, &stats);
@@ -2674,7 +2686,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
.ndo_start_xmit = t4_eth_xmit,
- .ndo_get_stats = cxgb_get_stats,
+ .ndo_get_stats64 = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
.ndo_set_mac_address = cxgb_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -2709,6 +2721,65 @@ static void setup_memwin(struct adapter *adap)
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
}
+static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
+{
+ u32 v;
+ int ret;
+
+ /* get device capabilities */
+ memset(c, 0, sizeof(*c));
+ c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ c->retval_len16 = htonl(FW_LEN16(*c));
+ ret = t4_wr_mbox(adap, 0, c, sizeof(*c), c);
+ if (ret < 0)
+ return ret;
+
+ /* select capabilities we'll be using */
+ if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+ if (!vf_acls)
+ c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+ else
+ c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+ } else if (vf_acls) {
+ dev_err(adap->pdev_dev, "virtualization ACLs not supported");
+ return ret;
+ }
+ c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ ret = t4_wr_mbox(adap, 0, c, sizeof(*c), NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_config_glbl_rss(adap, 0,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+ if (ret < 0)
+ return ret;
+
+ ret = t4_cfg_pfvf(adap, 0, 0, 0, MAX_EGRQ, 64, MAX_INGQ, 0, 0, 4,
+ 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+ if (ret < 0)
+ return ret;
+
+ t4_sge_init(adap);
+
+ /* get basic stuff going */
+ ret = t4_early_init(adap, 0);
+ if (ret < 0)
+ return ret;
+
+ /* tweak some settings */
+ t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
+ t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
+ v = t4_read_reg(adap, TP_PIO_DATA);
+ t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+ setup_memwin(adap);
+ return 0;
+}
+
/*
* Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
*/
@@ -2746,43 +2817,6 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
- /* get device capabilities */
- memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_READ);
- c.retval_len16 = htonl(FW_LEN16(c));
- ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
- if (ret < 0)
- goto bye;
-
- /* select capabilities we'll be using */
- if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
- if (!vf_acls)
- c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
- else
- c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
- } else if (vf_acls) {
- dev_err(adap->pdev_dev, "virtualization ACLs not supported");
- goto bye;
- }
- c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST | FW_CMD_WRITE);
- ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
- if (ret < 0)
- goto bye;
-
- ret = t4_config_glbl_rss(adap, 0,
- FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
- FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
- FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
- if (ret < 0)
- goto bye;
-
- ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
- FW_CMD_CAP_PF, FW_CMD_CAP_PF);
- if (ret < 0)
- goto bye;
-
for (v = 0; v < SGE_NTIMERS - 1; v++)
adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
@@ -2790,16 +2824,19 @@ static int adap_init0(struct adapter *adap)
for (v = 1; v < SGE_NCOUNTERS; v++)
adap->sge.counter_val[v] = min(intr_cnt[v - 1],
THRESHOLD_3_MASK);
- t4_sge_init(adap);
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
- /* get basic stuff going */
- ret = t4_early_init(adap, 0);
+ params[0] = FW_PARAM_DEV(CCLK);
+ ret = t4_query_params(adap, 0, 0, 0, 1, params, val);
if (ret < 0)
goto bye;
+ adap->params.vpd.cclk = val[0];
-#define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+ ret = adap_init1(adap, &c);
+ if (ret < 0)
+ goto bye;
#define FW_PARAM_PFVF(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
@@ -2853,6 +2890,18 @@ static int adap_init0(struct adapter *adap)
adap->vres.rq.size = val[3] - val[2] + 1;
adap->vres.pbl.start = val[4];
adap->vres.pbl.size = val[5] - val[4] + 1;
+
+ params[0] = FW_PARAM_PFVF(SQRQ_START);
+ params[1] = FW_PARAM_PFVF(SQRQ_END);
+ params[2] = FW_PARAM_PFVF(CQ_START);
+ params[3] = FW_PARAM_PFVF(CQ_END);
+ ret = t4_query_params(adap, 0, 0, 0, 4, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->vres.qp.start = val[0];
+ adap->vres.qp.size = val[1] - val[0] + 1;
+ adap->vres.cq.start = val[2];
+ adap->vres.cq.size = val[3] - val[2] + 1;
}
if (c.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -2876,14 +2925,6 @@ static int adap_init0(struct adapter *adap)
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
-
- /* tweak some settings */
- t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
- t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
- t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
- v = t4_read_reg(adap, TP_PIO_DATA);
- t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
- setup_memwin(adap);
return 0;
/*
@@ -2896,6 +2937,108 @@ bye: if (ret != -ETIMEDOUT && ret != -EIO)
return ret;
}
+/* EEH callbacks */
+
+static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ int i;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap)
+ goto out;
+
+ rtnl_lock();
+ adap->flags &= ~FW_OK;
+ notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+
+ netif_device_detach(dev);
+ netif_carrier_off(dev);
+ }
+ if (adap->flags & FULL_INIT_DONE)
+ cxgb_down(adap);
+ rtnl_unlock();
+ pci_disable_device(pdev);
+out: return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
+{
+ int i, ret;
+ struct fw_caps_config_cmd c;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap) {
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ if (t4_wait_dev_ready(adap) < 0)
+ return PCI_ERS_RESULT_DISCONNECT;
+ if (t4_fw_hello(adap, 0, 0, MASTER_MUST, NULL))
+ return PCI_ERS_RESULT_DISCONNECT;
+ adap->flags |= FW_OK;
+ if (adap_init1(adap, &c))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ ret = t4_alloc_vi(adap, 0, p->tx_chan, 0, 0, 1, NULL, NULL);
+ if (ret < 0)
+ return PCI_ERS_RESULT_DISCONNECT;
+ p->viid = ret;
+ p->xact_addr_filt = -1;
+ }
+
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ if (cxgb_up(adap))
+ return PCI_ERS_RESULT_DISCONNECT;
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void eeh_resume(struct pci_dev *pdev)
+{
+ int i;
+ struct adapter *adap = pci_get_drvdata(pdev);
+
+ if (!adap)
+ return;
+
+ rtnl_lock();
+ for_each_port(adap, i) {
+ struct net_device *dev = adap->port[i];
+
+ if (netif_running(dev)) {
+ link_start(dev);
+ cxgb_set_rxmode(dev);
+ }
+ netif_device_attach(dev);
+ }
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers cxgb4_eeh = {
+ .error_detected = eeh_err_detected,
+ .slot_reset = eeh_slot_reset,
+ .resume = eeh_resume,
+};
+
static inline bool is_10g_port(const struct link_config *lc)
{
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
@@ -3079,7 +3222,8 @@ static int __devinit enable_msix(struct adapter *adap)
static void __devinit print_port_info(struct adapter *adap)
{
static const char *base[] = {
- "R", "KX4", "T", "KX", "T", "KR", "CX4"
+ "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
+ "KX", "KR", "KR SFP+", "KR FEC"
};
int i;
@@ -3143,8 +3287,10 @@ static int __devinit init_one(struct pci_dev *pdev,
/* We control everything through PF 0 */
func = PCI_FUNC(pdev->devfn);
- if (func > 0)
+ if (func > 0) {
+ pci_save_state(pdev); /* to restore SR-IOV later */
goto sriov;
+ }
err = pci_enable_device(pdev);
if (err) {
@@ -3385,6 +3531,7 @@ static struct pci_driver cxgb4_driver = {
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = __devexit_p(remove_one),
+ .err_handler = &cxgb4_eeh,
};
static int __init cxgb4_init_module(void)
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 5b98546ac92d..0dc0866df1bf 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -185,6 +185,8 @@ struct cxgb4_virt_res { /* virtualized HW resources */
struct cxgb4_range stag;
struct cxgb4_range rq;
struct cxgb4_range pbl;
+ struct cxgb4_range qp;
+ struct cxgb4_range cq;
};
/*
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
index 9f96724a133a..5b990d24cca9 100644
--- a/drivers/net/cxgb4/l2t.c
+++ b/drivers/net/cxgb4/l2t.c
@@ -310,6 +310,13 @@ static void t4_l2e_free(struct l2t_entry *e)
neigh_release(e->neigh);
e->neigh = NULL;
}
+ while (e->arpq_head) {
+ struct sk_buff *skb = e->arpq_head;
+
+ e->arpq_head = skb->next;
+ kfree(skb);
+ }
+ e->arpq_tail = NULL;
}
spin_unlock_bh(&e->lock);
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index da272a98fdbc..d92129b6c140 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -221,6 +221,13 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
+ /*
+ * If the device is off-line, as in EEH, commands will time out.
+ * Fail them early so we don't waste time waiting.
+ */
+ if (adap->pdev->error_state != pci_channel_io_normal)
+ return -EIO;
+
v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
@@ -449,12 +456,10 @@ enum {
SF_RD_STATUS = 5, /* read status register */
SF_WR_ENABLE = 6, /* enable writes */
SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
- FW_START_SEC = 8, /* first flash sector for FW */
- FW_END_SEC = 15, /* last flash sector for FW */
- FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
- FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+ FW_MAX_SIZE = 512 * 1024,
};
/**
@@ -558,7 +563,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
{
int ret;
- if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+ if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
return -EINVAL;
addr = swab32(addr) | SF_RD_DATA_FAST;
@@ -596,7 +601,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
u32 buf[64];
unsigned int i, c, left, val, offset = addr & 0xff;
- if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
+ if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
return -EINVAL;
val = swab32(addr) | SF_PROG_PAGE;
@@ -614,7 +619,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
if (ret)
goto unlock;
}
- ret = flash_wait_op(adapter, 5, 1);
+ ret = flash_wait_op(adapter, 8, 1);
if (ret)
goto unlock;
@@ -647,9 +652,8 @@ unlock:
*/
static int get_fw_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter,
- FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ return t4_read_flash(adapter, adapter->params.sf_fw_start +
+ offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
}
/**
@@ -661,8 +665,8 @@ static int get_fw_version(struct adapter *adapter, u32 *vers)
*/
static int get_tp_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
- tp_microcode_ver),
+ return t4_read_flash(adapter, adapter->params.sf_fw_start +
+ offsetof(struct fw_hdr, tp_microcode_ver),
1, vers, 0);
}
@@ -684,9 +688,9 @@ int t4_check_fw_version(struct adapter *adapter)
if (!ret)
ret = get_tp_version(adapter, &adapter->params.tp_vers);
if (!ret)
- ret = t4_read_flash(adapter,
- FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
- 2, api_vers, 1);
+ ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
+ offsetof(struct fw_hdr, intfver_nic),
+ 2, api_vers, 1);
if (ret)
return ret;
@@ -726,7 +730,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
(ret = sf1_write(adapter, 4, 0, 1,
SF_ERASE_SECTOR | (start << 8))) != 0 ||
- (ret = flash_wait_op(adapter, 5, 500)) != 0) {
+ (ret = flash_wait_op(adapter, 14, 500)) != 0) {
dev_err(adapter->pdev_dev,
"erase of flash sector %d failed, error %d\n",
start, ret);
@@ -754,6 +758,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
u8 first_page[SF_PAGE_SIZE];
const u32 *p = (const u32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
+ unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+ unsigned int fw_img_start = adap->params.sf_fw_start;
+ unsigned int fw_start_sec = fw_img_start / sf_sec_size;
if (!size) {
dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -784,8 +791,8 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
return -EINVAL;
}
- i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
- ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
+ i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+ ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
if (ret)
goto out;
@@ -796,11 +803,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
- ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
- addr = FW_IMG_START;
+ addr = fw_img_start;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
@@ -810,7 +817,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
}
ret = t4_write_flash(adap,
- FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
+ fw_img_start + offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
out:
if (ret)
@@ -1128,6 +1135,7 @@ static void cim_intr_handler(struct adapter *adapter)
static void ulprx_intr_handler(struct adapter *adapter)
{
static struct intr_info ulprx_intr_info[] = {
+ { 0x1800000, "ULPRX context error", -1, 1 },
{ 0x7fffff, "ULPRX parity error", -1, 1 },
{ 0 }
};
@@ -2572,7 +2580,7 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
if (rss_size)
*rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
- return ntohs(c.viid_pkd);
+ return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
}
/**
@@ -2595,7 +2603,7 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
FW_VI_CMD_VFN(vf));
c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
- c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
+ c.type_viid = htons(FW_VI_CMD_VIID(viid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
}
@@ -3045,7 +3053,7 @@ static void __devinit init_link_config(struct link_config *lc,
}
}
-static int __devinit wait_dev_ready(struct adapter *adap)
+int t4_wait_dev_ready(struct adapter *adap)
{
if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
return 0;
@@ -3053,6 +3061,33 @@ static int __devinit wait_dev_ready(struct adapter *adap)
return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
}
+static int __devinit get_flash_params(struct adapter *adap)
+{
+ int ret;
+ u32 info;
+
+ ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = sf1_read(adap, 3, 0, 1, &info);
+ t4_write_reg(adap, SF_OP, 0); /* unlock SF */
+ if (ret)
+ return ret;
+
+ if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ return -EINVAL;
+ info >>= 16; /* log2 of size */
+ if (info >= 0x14 && info < 0x18)
+ adap->params.sf_nsec = 1 << (info - 16);
+ else if (info == 0x18)
+ adap->params.sf_nsec = 64;
+ else
+ return -EINVAL;
+ adap->params.sf_size = 1 << info;
+ adap->params.sf_fw_start =
+ t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
+ return 0;
+}
+
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -3066,13 +3101,19 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
{
int ret;
- ret = wait_dev_ready(adapter);
+ ret = t4_wait_dev_ready(adapter);
if (ret < 0)
return ret;
get_pci_mode(adapter, &adapter->params.pci);
adapter->params.rev = t4_read_reg(adapter, PL_REV);
+ ret = get_flash_params(adapter);
+ if (ret < 0) {
+ dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
+ return ret;
+ }
+
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
@@ -3122,12 +3163,13 @@ int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
+ adap->port[i]->dev_id = j;
ret = ntohl(c.u.info.lstatus_to_modtype);
p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
- p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
+ p->mod_type = FW_PORT_MOD_TYPE_NA;
init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
j++;
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 025623285c93..f886677b93ec 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -57,8 +57,6 @@ enum {
enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
- SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
- SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
};
enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
index 5ed56483cbc2..8fed46df886c 100644
--- a/drivers/net/cxgb4/t4_regs.h
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -326,6 +326,9 @@
#define EDC_1_BASE_ADDR 0x7980
+#define CIM_BOOT_CFG 0x7b00
+#define BOOTADDR_MASK 0xffffff00U
+
#define CIM_PF_MAILBOX_DATA 0x240
#define CIM_PF_MAILBOX_CTRL 0x280
#define MBMSGVALID 0x00000008U
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 63991d68950e..111c2a5763e4 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -475,7 +475,13 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_PBL_END = 0x12,
FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
+ FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15,
+ FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16,
+ FW_PARAMS_PARAM_PFVF_CQ_START = 0x17,
+ FW_PARAMS_PARAM_PFVF_CQ_END = 0x18,
FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
+ FW_PARAMS_PARAM_PFVF_VIID = 0x24,
+ FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
};
/*
@@ -804,16 +810,16 @@ struct fw_eq_ofld_cmd {
struct fw_vi_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
- __be16 viid_pkd;
+ __be16 type_viid;
u8 mac[6];
u8 portid_pkd;
u8 nmac;
u8 nmac0[6];
__be16 rsssize_pkd;
u8 nmac1[6];
- __be16 r7;
+ __be16 idsiiq_pkd;
u8 nmac2[6];
- __be16 r8;
+ __be16 idseiq_pkd;
u8 nmac3[6];
__be64 r9;
__be64 r10;
@@ -824,6 +830,7 @@ struct fw_vi_cmd {
#define FW_VI_CMD_ALLOC (1U << 31)
#define FW_VI_CMD_FREE (1U << 30)
#define FW_VI_CMD_VIID(x) ((x) << 0)
+#define FW_VI_CMD_VIID_GET(x) ((x) & 0xfff)
#define FW_VI_CMD_PORTID(x) ((x) << 4)
#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
@@ -1136,6 +1143,11 @@ struct fw_port_cmd {
__be32 lstatus_to_modtype;
__be16 pcap;
__be16 acap;
+ __be16 mtu;
+ __u8 cbllen;
+ __u8 r9;
+ __be32 r10;
+ __be64 r11;
} info;
struct fw_port_ppp {
__be32 pppen_to_ncsich;
@@ -1196,14 +1208,17 @@ struct fw_port_cmd {
#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
enum fw_port_type {
- FW_PORT_TYPE_FIBER,
- FW_PORT_TYPE_KX4,
+ FW_PORT_TYPE_FIBER_XFI,
+ FW_PORT_TYPE_FIBER_XAUI,
FW_PORT_TYPE_BT_SGMII,
- FW_PORT_TYPE_KX,
+ FW_PORT_TYPE_BT_XFI,
FW_PORT_TYPE_BT_XAUI,
- FW_PORT_TYPE_KR,
+ FW_PORT_TYPE_KX4,
FW_PORT_TYPE_CX4,
- FW_PORT_TYPE_TWINAX,
+ FW_PORT_TYPE_KX,
+ FW_PORT_TYPE_KR,
+ FW_PORT_TYPE_SFP,
+ FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
};
@@ -1213,6 +1228,9 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_LR,
FW_PORT_MOD_TYPE_SR,
FW_PORT_MOD_TYPE_ER,
+ FW_PORT_MOD_TYPE_TWINAX_PASSIVE,
+ FW_PORT_MOD_TYPE_TWINAX_ACTIVE,
+ FW_PORT_MOD_TYPE_LRM,
FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
};
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index abcc838e18af..4fd6b2b4554b 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -961,7 +961,7 @@ struct dm9000_rxhdr {
u8 RxPktReady;
u8 RxStatus;
__le16 RxLen;
-} __attribute__((__packed__));
+} __packed;
/*
* Received a packet and pass to upper layer
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index f654db9121de..a4a0d2b6eb1c 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 4dc02c71ffd6..307a72f483ee 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -359,6 +359,7 @@
#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
@@ -714,6 +715,7 @@
#define BME1000_E_PHY_ID_R2 0x01410CB1
#define I82577_E_PHY_ID 0x01540050
#define I82578_E_PHY_ID 0x004DD040
+#define I82579_E_PHY_ID 0x01540090
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c0b3db40bd73..9ee133f5034e 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -164,6 +164,7 @@ enum e1000_boards {
board_ich9lan,
board_ich10lan,
board_pchlan,
+ board_pch2lan,
};
struct e1000_queue_stats {
@@ -421,6 +422,8 @@ struct e1000_info {
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
#define FLAG2_IS_DISCARDING (1 << 2)
#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
+#define FLAG2_HAS_PHY_STATS (1 << 4)
+#define FLAG2_HAS_EEE (1 << 5)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -458,7 +461,6 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter);
-extern bool e1000e_has_link(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
@@ -476,6 +478,7 @@ extern struct e1000_info e1000_ich8_info;
extern struct e1000_info e1000_ich9_info;
extern struct e1000_info e1000_ich10_info;
extern struct e1000_info e1000_pch_info;
+extern struct e1000_info e1000_pch2_info;
extern struct e1000_info e1000_es2_info;
extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@ -494,6 +497,8 @@ extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 38d79a669059..45aebb4a6fe1 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 2c521218102b..77c5829ab945 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -118,7 +118,6 @@ static int e1000_get_settings(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status;
if (hw->phy.media_type == e1000_media_type_copper) {
@@ -156,22 +155,29 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_EXTERNAL;
}
- status = er32(STATUS);
- if (status & E1000_STATUS_LU) {
- if (status & E1000_STATUS_SPEED_1000)
- ecmd->speed = 1000;
- else if (status & E1000_STATUS_SPEED_100)
- ecmd->speed = 100;
- else
- ecmd->speed = 10;
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
- if (status & E1000_STATUS_FD)
- ecmd->duplex = DUPLEX_FULL;
- else
- ecmd->duplex = DUPLEX_HALF;
+ if (netif_running(netdev)) {
+ if (netif_carrier_ok(netdev)) {
+ ecmd->speed = adapter->link_speed;
+ ecmd->duplex = adapter->link_duplex - 1;
+ }
} else {
- ecmd->speed = -1;
- ecmd->duplex = -1;
+ u32 status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ if (status & E1000_STATUS_SPEED_1000)
+ ecmd->speed = 1000;
+ else if (status & E1000_STATUS_SPEED_100)
+ ecmd->speed = 100;
+ else
+ ecmd->speed = 10;
+
+ if (status & E1000_STATUS_FD)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ }
}
ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
@@ -179,7 +185,7 @@ static int e1000_get_settings(struct net_device *netdev,
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if ((hw->phy.media_type == e1000_media_type_copper) &&
- !hw->mac.get_link_status)
+ netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
ETH_TP_MDI;
else
@@ -191,19 +197,15 @@ static int e1000_get_settings(struct net_device *netdev,
static u32 e1000_get_link(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_mac_info *mac = &adapter->hw.mac;
+ struct e1000_hw *hw = &adapter->hw;
/*
- * If the link is not reported up to netdev, interrupts are disabled,
- * and so the physical link state may have changed since we last
- * looked. Set get_link_status to make sure that the true link
- * state is interrogated, rather than pulling a cached and possibly
- * stale link state from the driver.
+ * Avoid touching hardware registers when possible, otherwise
+ * link negotiation can get messed up when user-level scripts
+ * are rapidly polling the driver to see if link is up.
*/
- if (!netif_carrier_ok(netdev))
- mac->get_link_status = 1;
-
- return e1000e_has_link(adapter);
+ return netif_running(netdev) ? netif_carrier_ok(netdev) :
+ !!(er32(STATUS) & E1000_STATUS_LU);
}
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
@@ -880,6 +882,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
switch (mac->type) {
case e1000_ich10lan:
case e1000_pchlan:
+ case e1000_pch2lan:
mask |= (1 << 18);
break;
default:
@@ -1263,33 +1266,36 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
hw->mac.autoneg = 0;
- /* Workaround: K1 must be disabled for stable 1Gbps operation */
- if (hw->mac.type == e1000_pchlan)
- e1000_configure_k1_ich8lan(hw, false);
-
- if (hw->phy.type == e1000_phy_m88) {
- /* Auto-MDI/MDIX Off */
- e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
- /* reset to update Auto-MDI/MDIX */
- e1e_wphy(hw, PHY_CONTROL, 0x9140);
- /* autoneg off */
- e1e_wphy(hw, PHY_CONTROL, 0x8140);
- } else if (hw->phy.type == e1000_phy_gg82563)
- e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
-
- ctrl_reg = er32(CTRL);
-
- switch (hw->phy.type) {
- case e1000_phy_ife:
+ if (hw->phy.type == e1000_phy_ife) {
/* force 100, set loopback */
e1e_wphy(hw, PHY_CONTROL, 0x6100);
/* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_100 |/* Force Speed to 100 */
E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ ew32(CTRL, ctrl_reg);
+ udelay(500);
+
+ return 0;
+ }
+
+ /* Specific PHY configuration for loopback */
+ switch (hw->phy.type) {
+ case e1000_phy_m88:
+ /* Auto-MDI/MDIX Off */
+ e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1e_wphy(hw, PHY_CONTROL, 0x9140);
+ /* autoneg off */
+ e1e_wphy(hw, PHY_CONTROL, 0x8140);
+ break;
+ case e1000_phy_gg82563:
+ e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
break;
case e1000_phy_bm:
/* Set Default MAC Interface speed to 1GB */
@@ -1312,23 +1318,41 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
/* Set Early Link Enable */
e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
- /* fall through */
+ break;
+ case e1000_phy_82577:
+ case e1000_phy_82578:
+ /* Workaround: K1 must be disabled for stable 1Gbps operation */
+ e1000_configure_k1_ich8lan(hw, false);
+ break;
+ case e1000_phy_82579:
+ /* Disable PHY energy detect power down */
+ e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
+ e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+ /* Disable full chip energy detect */
+ e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
+ e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
+ /* Enable loopback on the PHY */
+#define I82577_PHY_LBK_CTRL 19
+ e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
+ break;
default:
- /* force 1000, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x4140);
- mdelay(250);
+ break;
+ }
- /* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg = er32(CTRL);
- ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
- ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
- E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
- E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+ /* force 1000, set loopback */
+ e1e_wphy(hw, PHY_CONTROL, 0x4140);
+ mdelay(250);
- if (adapter->flags & FLAG_IS_ICH)
- ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
- }
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
+
+ if (adapter->flags & FLAG_IS_ICH)
+ ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
if (hw->phy.media_type == e1000_media_type_copper &&
hw->phy.type == e1000_phy_m88) {
@@ -1868,6 +1892,7 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
if ((hw->phy.type == e1000_phy_ife) ||
(hw->mac.type == e1000_pchlan) ||
+ (hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_82583) ||
(hw->mac.type == e1000_82574)) {
INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 5d1220d188d4..0cd569a57f6d 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -217,7 +217,10 @@ enum e1e_registers {
E1000_SWSM = 0x05B50, /* SW Semaphore */
E1000_FWSM = 0x05B54, /* FW Semaphore */
E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
- E1000_CRC_OFFSET = 0x05F50, /* CRC Offset register */
+ E1000_FFLT_DBG = 0x05F04, /* Debug Register */
+ E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
+#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
+#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
E1000_HICR = 0x08F00, /* Host Interface Control */
};
@@ -303,13 +306,14 @@ enum e1e_registers {
#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E
-#define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x0002
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
@@ -387,6 +391,8 @@ enum e1e_registers {
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM 0x1502
+#define E1000_DEV_ID_PCH2_LV_V 0x1503
#define E1000_REVISION_4 4
@@ -406,6 +412,7 @@ enum e1000_mac_type {
e1000_ich9lan,
e1000_ich10lan,
e1000_pchlan,
+ e1000_pch2lan,
};
enum e1000_media_type {
@@ -442,6 +449,7 @@ enum e1000_phy_type {
e1000_phy_bm,
e1000_phy_82578,
e1000_phy_82577,
+ e1000_phy_82579,
};
enum e1000_bus_width {
@@ -929,6 +937,7 @@ struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
bool nvm_k1_enabled;
+ bool eee_disable;
};
struct e1000_hw {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index b2507d93de99..6b5e108bb51f 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,8 @@
* 82577LC Gigabit Network Connection
* 82578DM Gigabit Network Connection
* 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
*/
#include "e1000.h"
@@ -126,6 +128,13 @@
#define HV_SMB_ADDR_PEC_EN 0x0200
#define HV_SMB_ADDR_VALID 0x0080
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+
/* Strapping Option Register - RO */
#define E1000_STRAP 0x0000C
#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
@@ -226,6 +235,8 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -277,13 +288,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode
+ * after Sx->S0. If the manageability engine (ME) is
+ * disabled, then toggle the LANPHYPC Value bit to force
+ * the interconnect to PCIe mode.
+ */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
- /*
- * The MAC-PHY interconnect may still be in SMBus mode
- * after Sx->S0. Toggle the LANPHYPC Value bit to force
- * the interconnect to PCIe mode, but only if there is no
- * firmware present otherwise firmware will have done it.
- */
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -324,6 +335,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
switch (phy->type) {
case e1000_phy_82577:
+ case e1000_phy_82579:
phy->ops.check_polarity = e1000_check_polarity_82577;
phy->ops.force_speed_duplex =
e1000_phy_force_speed_duplex_82577;
@@ -515,6 +527,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
case e1000_ich8lan:
case e1000_ich9lan:
case e1000_ich10lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
/* ID LED init */
mac->ops.id_led_init = e1000e_id_led_init;
/* setup LED */
@@ -526,6 +540,9 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->ops.led_off = e1000_led_off_ich8lan;
break;
case e1000_pchlan:
+ case e1000_pch2lan:
+ /* check management mode */
+ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_pchlan;
/* setup LED */
@@ -544,10 +561,47 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
+ /* Disable PHY configuration by hardware, config by software */
+ if (mac->type == e1000_pch2lan) {
+ u32 extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ }
+
return 0;
}
/**
+ * e1000_set_eee_pchlan - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure. The bits in
+ * the LPI Control register will remain set only if/when link is up.
+ **/
+static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 phy_reg;
+
+ if (hw->phy.type != e1000_phy_82579)
+ goto out;
+
+ ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ if (ret_val)
+ goto out;
+
+ if (hw->dev_spec.ich8lan.eee_disable)
+ phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
+ else
+ phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
+
+ ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+out:
+ return ret_val;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -604,6 +658,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
*/
e1000e_check_downshift(hw);
+ /* Enable/Disable EEE after link up */
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ goto out;
+
/*
* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
@@ -647,10 +706,19 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
if (rc)
return rc;
- if (hw->mac.type == e1000_pchlan)
- rc = e1000_init_phy_params_pchlan(hw);
- else
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ case e1000_ich9lan:
+ case e1000_ich10lan:
rc = e1000_init_phy_params_ich8lan(hw);
+ break;
+ case e1000_pchlan:
+ case e1000_pch2lan:
+ rc = e1000_init_phy_params_pchlan(hw);
+ break;
+ default:
+ break;
+ }
if (rc)
return rc;
@@ -774,7 +842,7 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
* e1000_check_mng_mode_ich8lan - Checks management mode
* @hw: pointer to the HW structure
*
- * This checks if the adapter has manageability enabled.
+ * This checks if the adapter has any manageability enabled.
* This is a function pointer entry point only called by read/write
* routines for the PHY and NVM parts.
**/
@@ -783,9 +851,26 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ * e1000_check_mng_mode_pchlan - Checks management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has iAMT enabled.
+ * This is a function pointer entry point only called by read/write
+ * routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+ u32 fwsm;
- return (fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+ fwsm = er32(FWSM);
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@@ -820,14 +905,6 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
s32 ret_val = 0;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
- if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
- !(hw->mac.type == e1000_pchlan))
- return ret_val;
-
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
/*
* Initialize the PHY from the NVM on ICH platforms. This
* is needed due to an issue where the NVM configuration is
@@ -835,12 +912,27 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* Therefore, after each PHY reset, we will load the
* configuration data out of the NVM manually.
*/
- if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
- (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
- (hw->mac.type == e1000_pchlan))
+ switch (hw->mac.type) {
+ case e1000_ich8lan:
+ if (phy->type != e1000_phy_igp_3)
+ return ret_val;
+
+ if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) {
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ }
+ /* Fall-thru */
+ case e1000_pchlan:
+ case e1000_pch2lan:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
- else
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ break;
+ default:
+ return ret_val;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
data = er32(FEXTNVM);
if (!(data & sw_cfg_mask))
@@ -851,8 +943,10 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* extended configuration before SW configuration
*/
data = er32(EXTCNF_CTRL);
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
- goto out;
+ if (!(hw->mac.type == e1000_pch2lan)) {
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+ }
cnf_size = er32(EXTCNF_SIZE);
cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
@@ -864,7 +958,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- (hw->mac.type == e1000_pchlan)) {
+ ((hw->mac.type == e1000_pchlan) ||
+ (hw->mac.type == e1000_pch2lan))) {
/*
* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
@@ -1071,16 +1166,18 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
u32 mac_reg;
u16 oem_reg;
- if (hw->mac.type != e1000_pchlan)
+ if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
return ret_val;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- mac_reg = er32(EXTCNF_CTRL);
- if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
- goto out;
+ if (!(hw->mac.type == e1000_pch2lan)) {
+ mac_reg = er32(EXTCNF_CTRL);
+ if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+ goto out;
+ }
mac_reg = er32(FEXTNVM);
if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
@@ -1221,6 +1318,243 @@ out:
}
/**
+ * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ * @hw: pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+ u16 i;
+
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ mac_reg = er32(RAL(i));
+ e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
+ e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
+ mac_reg = er32(RAH(i));
+ e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
+ e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
+ }
+}
+
+static u32 e1000_calc_rx_da_crc(u8 mac[])
+{
+ u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
+ u32 i, j, mask, crc;
+
+ crc = 0xffffffff;
+ for (i = 0; i < 6; i++) {
+ crc = crc ^ mac[i];
+ for (j = 8; j > 0; j--) {
+ mask = (crc & 1) * (-1);
+ crc = (crc >> 1) ^ (poly & mask);
+ }
+ }
+ return ~crc;
+}
+
+/**
+ * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ * with 82579 PHY
+ * @hw: pointer to the HW structure
+ * @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+ s32 ret_val = 0;
+ u16 phy_reg, data;
+ u32 mac_reg;
+ u16 i;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* disable Rx path while enabling/disabling workaround */
+ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+ if (ret_val)
+ goto out;
+
+ if (enable) {
+ /*
+ * Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ * SHRAL/H) and initial CRC values to the MAC
+ */
+ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ u8 mac_addr[ETH_ALEN] = {0};
+ u32 addr_high, addr_low;
+
+ addr_high = er32(RAH(i));
+ if (!(addr_high & E1000_RAH_AV))
+ continue;
+ addr_low = er32(RAL(i));
+ mac_addr[0] = (addr_low & 0xFF);
+ mac_addr[1] = ((addr_low >> 8) & 0xFF);
+ mac_addr[2] = ((addr_low >> 16) & 0xFF);
+ mac_addr[3] = ((addr_low >> 24) & 0xFF);
+ mac_addr[4] = (addr_high & 0xFF);
+ mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+ ew32(PCH_RAICC(i),
+ e1000_calc_rx_da_crc(mac_addr));
+ }
+
+ /* Write Rx addresses to the PHY */
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+ /* Enable jumbo frame workaround in the MAC */
+ mac_reg = er32(FFLT_DBG);
+ mac_reg &= ~(1 << 14);
+ mac_reg |= (7 << 15);
+ ew32(FFLT_DBG, mac_reg);
+
+ mac_reg = er32(RCTL);
+ mac_reg |= E1000_RCTL_SECRC;
+ ew32(RCTL, mac_reg);
+
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data | (1 << 0));
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* Enable jumbo frame workaround in the PHY */
+ e1e_rphy(hw, PHY_REG(769, 20), &data);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ data |= (0x37 << 5);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 16), &data);
+ data &= ~(1 << 13);
+ data |= (1 << 12);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x1A << 2);
+ ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ goto out;
+ ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xFE00);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, HV_PM_CTRL, &data);
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+ if (ret_val)
+ goto out;
+ } else {
+ /* Write MAC register values back to h/w defaults */
+ mac_reg = er32(FFLT_DBG);
+ mac_reg &= ~(0xF << 14);
+ ew32(FFLT_DBG, mac_reg);
+
+ mac_reg = er32(RCTL);
+ mac_reg &= ~E1000_RCTL_SECRC;
+ ew32(FFLT_DBG, mac_reg);
+
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ data & ~(1 << 0));
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_read_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
+ if (ret_val)
+ goto out;
+ data &= ~(0xF << 8);
+ data |= (0xB << 8);
+ ret_val = e1000e_write_kmrn_reg(hw,
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ data);
+ if (ret_val)
+ goto out;
+
+ /* Write PHY register values back to h/w defaults */
+ e1e_rphy(hw, PHY_REG(769, 20), &data);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 23), &data);
+ data &= ~(0x7F << 5);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(769, 16), &data);
+ data &= ~(1 << 12);
+ data |= (1 << 13);
+ ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, PHY_REG(776, 20), &data);
+ data &= ~(0x3FF << 2);
+ data |= (0x8 << 2);
+ ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+ if (ret_val)
+ goto out;
+ ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
+ if (ret_val)
+ goto out;
+ e1e_rphy(hw, HV_PM_CTRL, &data);
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+ if (ret_val)
+ goto out;
+ }
+
+ /* re-enable Rx path after enabling/disabling workaround */
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ * done after every PHY reset.
+ **/
+static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ if (hw->mac.type != e1000_pch2lan)
+ goto out;
+
+ /* Set MDIO slow mode before any other MDIO access */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_lan_init_done_ich8lan - Check for PHY config completion
* @hw: pointer to the HW structure
*
@@ -1271,12 +1605,17 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto out;
break;
+ case e1000_pch2lan:
+ ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+ if (ret_val)
+ goto out;
+ break;
default:
break;
}
/* Dummy read to clear the phy wakeup bit after lcd reset */
- if (hw->mac.type == e1000_pchlan)
+ if (hw->mac.type >= e1000_pchlan)
e1e_rphy(hw, BM_WUC, &reg);
/* Configure the LCD with the extended configuration region in NVM */
@@ -2800,6 +3139,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
ew32(FCTTV, hw->fc.pause_time);
if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
ew32(FCRTV_PCH, hw->fc.refresh_time);
@@ -2863,6 +3203,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
break;
case e1000_phy_82577:
+ case e1000_phy_82579:
ret_val = e1000_copper_link_setup_82577(hw);
if (ret_val)
return ret_val;
@@ -3116,21 +3457,12 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
{
u32 phy_ctrl;
- switch (hw->mac.type) {
- case e1000_ich8lan:
- case e1000_ich9lan:
- case e1000_ich10lan:
- case e1000_pchlan:
- phy_ctrl = er32(PHY_CTRL);
- phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
- E1000_PHY_CTRL_GBE_DISABLE;
- ew32(PHY_CTRL, phy_ctrl);
+ phy_ctrl = er32(PHY_CTRL);
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
+ ew32(PHY_CTRL, phy_ctrl);
- if (hw->mac.type == e1000_pchlan)
- e1000_phy_hw_reset_ich8lan(hw);
- default:
- break;
- }
+ if (hw->mac.type >= e1000_pchlan)
+ e1000_phy_hw_reset_ich8lan(hw);
}
/**
@@ -3370,6 +3702,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
/* Clear PHY statistics registers */
if ((hw->phy.type == e1000_phy_82578) ||
+ (hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
@@ -3390,7 +3723,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
static struct e1000_mac_operations ich8_mac_ops = {
.id_led_init = e1000e_id_led_init,
- .check_mng_mode = e1000_check_mng_mode_ich8lan,
+ /* check_mng_mode dependent on mac type */
.check_for_link = e1000_check_for_copper_link_ich8lan,
/* cleanup_led dependent on mac type */
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
@@ -3497,6 +3830,7 @@ struct e1000_info e1000_pch_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_DISABLE_FC_PAUSE_TIME /* errata */
| FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS,
.pba = 26,
.max_hw_frame_size = 4096,
.get_variants = e1000_get_variants_ich8lan,
@@ -3504,3 +3838,23 @@ struct e1000_info e1000_pch_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &ich8_nvm_ops,
};
+
+struct e1000_info e1000_pch2_info = {
+ .mac = e1000_pch2lan,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_RX_CSUM_ENABLED
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 18,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &ich8_nvm_ops,
+};
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a968e3a416ac..df4a27922931 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57a7e41da69e..71592ed2e686 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -52,7 +52,9 @@
#include "e1000.h"
-#define DRV_VERSION "1.0.2-k4"
+#define DRV_EXTRAVERSION "-k2"
+
+#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -67,6 +69,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_ich9lan] = &e1000_ich9_info,
[board_ich10lan] = &e1000_ich10_info,
[board_pchlan] = &e1000_pch_info,
+ [board_pch2lan] = &e1000_pch2_info,
};
struct e1000_reg_info {
@@ -2723,6 +2726,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
e1e_wphy(hw, 22, phy_data);
}
+ /* Workaround Si errata on 82579 - configure jumbo frame flow */
+ if (hw->mac.type == e1000_pch2lan) {
+ s32 ret_val;
+
+ if (rctl & E1000_RCTL_LPE)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+ }
+
/* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
@@ -2759,7 +2772,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
* per packet.
*/
pages = PAGE_USE_COUNT(adapter->netdev->mtu);
- if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
+ if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) &&
(PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
adapter->rx_ps_pages = pages;
else
@@ -3118,7 +3131,27 @@ void e1000e_reset(struct e1000_adapter *adapter)
* with ERT support assuming ERT set to E1000_ERT_2048), or
* - the full Rx FIFO size minus one full frame
*/
- if (hw->mac.type == e1000_pchlan) {
+ if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
+ fc->pause_time = 0xFFFF;
+ else
+ fc->pause_time = E1000_FC_PAUSE_TIME;
+ fc->send_xon = 1;
+ fc->current_mode = fc->requested_mode;
+
+ switch (hw->mac.type) {
+ default:
+ if ((adapter->flags & FLAG_HAS_ERT) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN))
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - (E1000_ERT_2048 << 3)));
+ else
+ hwm = min(((pba << 10) * 9 / 10),
+ ((pba << 10) - adapter->max_frame_size));
+
+ fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+ fc->low_water = fc->high_water - 8;
+ break;
+ case e1000_pchlan:
/*
* Workaround PCH LOM adapter hangs with certain network
* loads. If hangs persist, try disabling Tx flow control.
@@ -3131,26 +3164,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->low_water = 0x3000;
}
fc->refresh_time = 0x1000;
- } else {
- if ((adapter->flags & FLAG_HAS_ERT) &&
- (adapter->netdev->mtu > ETH_DATA_LEN))
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - (E1000_ERT_2048 << 3)));
- else
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - adapter->max_frame_size));
-
- fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
- fc->low_water = fc->high_water - 8;
+ break;
+ case e1000_pch2lan:
+ fc->high_water = 0x05C20;
+ fc->low_water = 0x05048;
+ fc->pause_time = 0x0650;
+ fc->refresh_time = 0x0400;
+ break;
}
- if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
- fc->pause_time = 0xFFFF;
- else
- fc->pause_time = E1000_FC_PAUSE_TIME;
- fc->send_xon = 1;
- fc->current_mode = fc->requested_mode;
-
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
@@ -3162,8 +3184,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000_get_hw_control(adapter);
ew32(WUC, 0);
- if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)
- e1e_wphy(&adapter->hw, BM_WUC, 0);
if (mac->ops.init_hw(hw))
e_err("Hardware Error\n");
@@ -3672,6 +3692,110 @@ static void e1000_update_phy_info(unsigned long data)
}
/**
+ * e1000e_update_phy_stats - Update the PHY statistics counters
+ * @adapter: board private structure
+ **/
+static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ s32 ret_val;
+ u16 phy_data;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+
+ hw->phy.addr = 1;
+
+#define HV_PHY_STATS_PAGE 778
+ /*
+ * A page set is expensive so check if already on desired page.
+ * If not, set to the page with the PHY status registers.
+ */
+ ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ &phy_data);
+ if (ret_val)
+ goto release;
+ if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (HV_PHY_STATS_PAGE <<
+ IGP_PAGE_SHIFT));
+ if (ret_val)
+ goto release;
+ }
+
+ /* Read/clear the upper 16-bit registers and read/accumulate lower */
+
+ /* Single Collision Count */
+ e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.scc += phy_data;
+
+ /* Excessive Collision Count */
+ e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.ecol += phy_data;
+
+ /* Multiple Collision Count */
+ e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.mcc += phy_data;
+
+ /* Late Collision Count */
+ e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_LATECOL_LOWER &
+ MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.latecol += phy_data;
+
+ /* Collision Count - also used for adaptive IFS */
+ e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ hw->mac.collision_delta = phy_data;
+
+ /* Defer Count */
+ e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.dc += phy_data;
+
+ /* Transmit with no CRS */
+ e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
+ &phy_data);
+ if (!ret_val)
+ adapter->stats.tncrs += phy_data;
+
+release:
+ hw->phy.ops.release(hw);
+}
+
+/**
* e1000e_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
@@ -3680,7 +3804,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- u16 phy_data;
/*
* Prevent stats update while adapter is being reset, or if the pci
@@ -3700,34 +3823,27 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.roc += er32(ROC);
adapter->stats.mpc += er32(MPC);
- if ((hw->phy.type == e1000_phy_82578) ||
- (hw->phy.type == e1000_phy_82577)) {
- e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
- adapter->stats.scc += phy_data;
-
- e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
- adapter->stats.ecol += phy_data;
-
- e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
- adapter->stats.mcc += phy_data;
-
- e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
- adapter->stats.latecol += phy_data;
-
- e1e_rphy(hw, HV_DC_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
- adapter->stats.dc += phy_data;
- } else {
- adapter->stats.scc += er32(SCC);
- adapter->stats.ecol += er32(ECOL);
- adapter->stats.mcc += er32(MCC);
- adapter->stats.latecol += er32(LATECOL);
- adapter->stats.dc += er32(DC);
+
+ /* Half-duplex statistics */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
+ e1000e_update_phy_stats(adapter);
+ } else {
+ adapter->stats.scc += er32(SCC);
+ adapter->stats.ecol += er32(ECOL);
+ adapter->stats.mcc += er32(MCC);
+ adapter->stats.latecol += er32(LATECOL);
+ adapter->stats.dc += er32(DC);
+
+ hw->mac.collision_delta = er32(COLC);
+
+ if ((hw->mac.type != e1000_82574) &&
+ (hw->mac.type != e1000_82583))
+ adapter->stats.tncrs += er32(TNCRS);
+ }
+ adapter->stats.colc += hw->mac.collision_delta;
}
+
adapter->stats.xonrxc += er32(XONRXC);
adapter->stats.xontxc += er32(XONTXC);
adapter->stats.xoffrxc += er32(XOFFRXC);
@@ -3745,28 +3861,9 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
hw->mac.tx_packet_delta = er32(TPT);
adapter->stats.tpt += hw->mac.tx_packet_delta;
- if ((hw->phy.type == e1000_phy_82578) ||
- (hw->phy.type == e1000_phy_82577)) {
- e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
- hw->mac.collision_delta = phy_data;
- } else {
- hw->mac.collision_delta = er32(COLC);
- }
- adapter->stats.colc += hw->mac.collision_delta;
adapter->stats.algnerrc += er32(ALGNERRC);
adapter->stats.rxerrc += er32(RXERRC);
- if ((hw->phy.type == e1000_phy_82578) ||
- (hw->phy.type == e1000_phy_82577)) {
- e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
- if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
- adapter->stats.tncrs += phy_data;
- } else {
- if ((hw->mac.type != e1000_82574) &&
- (hw->mac.type != e1000_82583))
- adapter->stats.tncrs += er32(TNCRS);
- }
adapter->stats.cexterr += er32(CEXTERR);
adapter->stats.tsctc += er32(TSCTC);
adapter->stats.tsctfc += er32(TSCTFC);
@@ -3865,7 +3962,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
}
-bool e1000e_has_link(struct e1000_adapter *adapter)
+static bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = 0;
@@ -4841,14 +4938,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
int retval = 0;
/* copy MAC RARs to PHY RARs */
- for (i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
- mac_reg = er32(RAL(i));
- e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
- e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
- mac_reg = er32(RAH(i));
- e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
- e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0xFFFF));
- }
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
/* copy MAC MTA to PHY MTA */
for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
@@ -5899,6 +5989,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
+
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
@@ -5935,7 +6028,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
+ pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a150e48a117f..593251c78c6f 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -161,6 +161,15 @@ E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lea
E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
"the CRC");
+/*
+ * Enable/disable EEE (a.k.a. IEEE802.3az)
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1
+ */
+E1000_PARAM(EEE, "Enable/disable on parts that support the feature");
+
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
@@ -477,4 +486,23 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
}
+ { /* EEE for parts supporting the feature */
+ static const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "EEE Support",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (adapter->flags2 & FLAG2_HAS_EEE) {
+ /* Currently only supported on 82579 */
+ if (num_EEE > bd) {
+ unsigned int eee = EEE[bd];
+ e1000_validate_option(&eee, &opt, adapter);
+ hw->dev_spec.ich8lan.eee_disable = !eee;
+ } else {
+ hw->dev_spec.ich8lan.eee_disable = !opt.def;
+ }
+ }
+ }
}
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index b4ac82d51b20..3d3dc0c82355 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -2319,6 +2319,9 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
case I82577_E_PHY_ID:
phy_type = e1000_phy_82577;
break;
+ case I82579_E_PHY_ID:
+ phy_type = e1000_phy_82579;
+ break;
default:
phy_type = e1000_phy_unknown;
break;
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 882c50c9c34f..f608a6c54af5 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -126,7 +126,7 @@ struct ehea_swqe {
u8 immediate_data[SWQE2_MAX_IMM];
/* 0xd0 */
struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
- } immdata_desc __attribute__ ((packed));
+ } immdata_desc __packed;
/* Send WQE Format 3 */
struct {
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index d769772998c6..0a35085004d0 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -25,9 +25,10 @@
struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
{
- struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+ struct vic_provinfo *vp;
- if (!vp || !oui)
+ vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+ if (!vp)
return NULL;
memcpy(vp->oui, oui, sizeof(vp->oui));
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index 085c2a274cb1..7e46e5e8600f 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -44,7 +44,7 @@ struct vic_provinfo {
u16 length;
u8 value[0];
} tlv[0];
-} __attribute__ ((packed));
+} __packed;
#define VIC_PROVINFO_MAX_DATA 1385
#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 6ed2df14ec84..37ce8aca2cc6 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -180,6 +180,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @dty_tx: last buffer actually sent
* @num_rx: number of receive buffers
* @cur_rx: current receive buffer
+ * @vma: pointer to array of virtual memory addresses for buffers
* @netdev: pointer to network device structure
* @napi: NAPI structure
* @stats: network device statistics
@@ -203,6 +204,8 @@ struct ethoc {
unsigned int num_rx;
unsigned int cur_rx;
+ void** vma;
+
struct net_device *netdev;
struct napi_struct napi;
struct net_device_stats stats;
@@ -285,18 +288,22 @@ static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
ethoc_write(dev, MODER, mode);
}
-static int ethoc_init_ring(struct ethoc *dev)
+static int ethoc_init_ring(struct ethoc *dev, void* mem_start)
{
struct ethoc_bd bd;
int i;
+ void* vma;
dev->cur_tx = 0;
dev->dty_tx = 0;
dev->cur_rx = 0;
+ ethoc_write(dev, TX_BD_NUM, dev->num_tx);
+
/* setup transmission buffers */
- bd.addr = virt_to_phys(dev->membase);
+ bd.addr = mem_start;
bd.stat = TX_BD_IRQ | TX_BD_CRC;
+ vma = dev->membase;
for (i = 0; i < dev->num_tx; i++) {
if (i == dev->num_tx - 1)
@@ -304,6 +311,9 @@ static int ethoc_init_ring(struct ethoc *dev)
ethoc_write_bd(dev, i, &bd);
bd.addr += ETHOC_BUFSIZ;
+
+ dev->vma[i] = vma;
+ vma += ETHOC_BUFSIZ;
}
bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
@@ -314,6 +324,9 @@ static int ethoc_init_ring(struct ethoc *dev)
ethoc_write_bd(dev, dev->num_tx + i, &bd);
bd.addr += ETHOC_BUFSIZ;
+
+ dev->vma[dev->num_tx + i] = vma;
+ vma += ETHOC_BUFSIZ;
}
return 0;
@@ -415,7 +428,7 @@ static int ethoc_rx(struct net_device *dev, int limit)
skb = netdev_alloc_skb_ip_align(dev, size);
if (likely(skb)) {
- void *src = phys_to_virt(bd.addr);
+ void *src = priv->vma[entry];
memcpy_fromio(skb_put(skb, size), src, size);
skb->protocol = eth_type_trans(skb, dev);
priv->stats.rx_packets++;
@@ -600,8 +613,11 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
while (time_before(jiffies, timeout)) {
u32 stat = ethoc_read(priv, MIISTATUS);
- if (!(stat & MIISTATUS_BUSY))
+ if (!(stat & MIISTATUS_BUSY)) {
+ /* reset MII command register */
+ ethoc_write(priv, MIICOMMAND, 0);
return 0;
+ }
schedule();
}
@@ -622,21 +638,12 @@ static int ethoc_mdio_probe(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
struct phy_device *phy;
- int i;
+ int err;
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- phy = priv->mdio->phy_map[i];
- if (phy) {
- if (priv->phy_id != -1) {
- /* attach to specified PHY */
- if (priv->phy_id == phy->addr)
- break;
- } else {
- /* autoselect PHY if none was specified */
- if (phy->addr != 0)
- break;
- }
- }
+ if (priv->phy_id != -1) {
+ phy = priv->mdio->phy_map[priv->phy_id];
+ } else {
+ phy = phy_find_first(priv->mdio);
}
if (!phy) {
@@ -644,11 +651,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
return -ENXIO;
}
- phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0,
+ err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
PHY_INTERFACE_MODE_GMII);
- if (IS_ERR(phy)) {
+ if (err) {
dev_err(&dev->dev, "could not attach to PHY\n");
- return PTR_ERR(phy);
+ return err;
}
priv->phy = phy;
@@ -658,8 +665,6 @@ static int ethoc_mdio_probe(struct net_device *dev)
static int ethoc_open(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
- unsigned int min_tx = 2;
- unsigned int num_bd;
int ret;
ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
@@ -667,14 +672,7 @@ static int ethoc_open(struct net_device *dev)
if (ret)
return ret;
- /* calculate the number of TX/RX buffers, maximum 128 supported */
- num_bd = min_t(unsigned int,
- 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
- priv->num_tx = max(min_tx, num_bd / 4);
- priv->num_rx = num_bd - priv->num_tx;
- ethoc_write(priv, TX_BD_NUM, priv->num_tx);
-
- ethoc_init_ring(priv);
+ ethoc_init_ring(priv, (void*)dev->mem_start);
ethoc_reset(priv);
if (netif_queue_stopped(dev)) {
@@ -838,7 +836,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
else
bd.stat &= ~TX_BD_PAD;
- dest = phys_to_virt(bd.addr);
+ dest = priv->vma[entry];
memcpy_toio(dest, skb->data, skb->len);
bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -884,6 +882,7 @@ static int ethoc_probe(struct platform_device *pdev)
struct resource *mem = NULL;
struct ethoc *priv = NULL;
unsigned int phy;
+ int num_bd;
int ret = 0;
/* allocate networking device */
@@ -965,7 +964,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
} else {
/* Allocate buffer memory */
- priv->membase = dma_alloc_coherent(NULL,
+ priv->membase = dmam_alloc_coherent(&pdev->dev,
buffer_size, (void *)&netdev->mem_start,
GFP_KERNEL);
if (!priv->membase) {
@@ -978,6 +977,18 @@ static int ethoc_probe(struct platform_device *pdev)
priv->dma_alloc = buffer_size;
}
+ /* calculate the number of TX/RX buffers, maximum 128 supported */
+ num_bd = min_t(unsigned int,
+ 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
+ priv->num_tx = max(2, num_bd / 4);
+ priv->num_rx = num_bd - priv->num_tx;
+
+ priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
+ if (!priv->vma) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
/* Allow the platform setup code to pass in a MAC address. */
if (pdev->dev.platform_data) {
struct ethoc_platform_data *pdata =
@@ -1063,21 +1074,6 @@ free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
- if (priv) {
- if (priv->dma_alloc)
- dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
- netdev->mem_start);
- else if (priv->membase)
- devm_iounmap(&pdev->dev, priv->membase);
- if (priv->iobase)
- devm_iounmap(&pdev->dev, priv->iobase);
- }
- if (mem)
- devm_release_mem_region(&pdev->dev, mem->start,
- mem->end - mem->start + 1);
- if (mmio)
- devm_release_mem_region(&pdev->dev, mmio->start,
- mmio->end - mmio->start + 1);
free_netdev(netdev);
out:
return ret;
@@ -1104,17 +1100,6 @@ static int ethoc_remove(struct platform_device *pdev)
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
- if (priv->dma_alloc)
- dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
- netdev->mem_start);
- else {
- devm_iounmap(&pdev->dev, priv->membase);
- devm_release_mem_region(&pdev->dev, netdev->mem_start,
- netdev->mem_end - netdev->mem_start + 1);
- }
- devm_iounmap(&pdev->dev, priv->iobase);
- devm_release_mem_region(&pdev->dev, netdev->base_addr,
- priv->io_region_size);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index edfff92a6d8e..a3cae4ed6ac9 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -210,7 +210,7 @@ static void fec_stop(struct net_device *dev);
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
-static int
+static netdev_tx_t
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
@@ -679,30 +679,24 @@ static int fec_enet_mii_probe(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct phy_device *phy_dev = NULL;
- int phy_addr;
+ int ret;
fep->phy_dev = NULL;
/* find the first phy */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (fep->mii_bus->phy_map[phy_addr]) {
- phy_dev = fep->mii_bus->phy_map[phy_addr];
- break;
- }
- }
-
+ phy_dev = phy_find_first(fep->mii_bus);
if (!phy_dev) {
printk(KERN_ERR "%s: no PHY found\n", dev->name);
return -ENODEV;
}
/* attach the mac to the phy */
- phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
+ ret = phy_connect_direct(dev, phy_dev,
&fec_enet_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phy_dev)) {
+ if (ret) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phy_dev);
+ return ret;
}
/* mask with MAC supported features */
@@ -1365,6 +1359,8 @@ fec_drv_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+
static int
fec_suspend(struct platform_device *dev, pm_message_t state)
{
@@ -1395,15 +1391,31 @@ fec_resume(struct platform_device *dev)
return 0;
}
+static const struct dev_pm_ops fec_pm_ops = {
+ .suspend = fec_suspend,
+ .resume = fec_resume,
+ .freeze = fec_suspend,
+ .thaw = fec_resume,
+ .poweroff = fec_suspend,
+ .restore = fec_resume,
+};
+
+#define FEC_PM_OPS (&fec_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define FEC_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
static struct platform_driver fec_driver = {
.driver = {
.name = "fec",
.owner = THIS_MODULE,
+ .pm = FEC_PM_OPS,
},
.probe = fec_probe,
.remove = __devexit_p(fec_drv_remove),
- .suspend = fec_suspend,
- .resume = fec_resume,
};
static int __init
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 006f64d9f96a..dbaf72cbb233 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -29,15 +29,14 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
int reg, u32 value)
{
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
- struct mpc52xx_fec __iomem *fec;
+ struct mpc52xx_fec __iomem *fec = priv->regs;
int tries = 3;
value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
- fec = priv->regs;
out_be32(&fec->ievent, FEC_IEVENT_MII);
- out_be32(&priv->regs->mii_data, value);
+ out_be32(&fec->mii_data, value);
/* wait for it to finish, this takes about 23 us on lite5200b */
while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
@@ -47,7 +46,7 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
return -ETIMEDOUT;
return value & FEC_MII_DATA_OP_RD ?
- in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0;
+ in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
}
static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
@@ -69,9 +68,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
struct device_node *np = of->dev.of_node;
struct mii_bus *bus;
struct mpc52xx_fec_mdio_priv *priv;
- struct resource res = {};
+ struct resource res;
int err;
- int i;
bus = mdiobus_alloc();
if (bus == NULL)
@@ -93,7 +91,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
err = of_address_to_resource(np, 0, &res);
if (err)
goto out_free;
- priv->regs = ioremap(res.start, res.end - res.start + 1);
+ priv->regs = ioremap(res.start, resource_size(&res));
if (priv->regs == NULL) {
err = -ENOMEM;
goto out_free;
@@ -118,10 +116,6 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
out_unmap:
iounmap(priv->regs);
out_free:
- for (i=0; i<PHY_MAX_ADDR; i++)
- if (bus->irq[i] != PHY_POLL)
- irq_dispose_mapping(bus->irq[i]);
- kfree(bus->irq);
kfree(priv);
mdiobus_free(bus);
@@ -133,23 +127,16 @@ static int mpc52xx_fec_mdio_remove(struct of_device *of)
struct device *dev = &of->dev;
struct mii_bus *bus = dev_get_drvdata(dev);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
- int i;
mdiobus_unregister(bus);
dev_set_drvdata(dev, NULL);
-
iounmap(priv->regs);
- for (i=0; i<PHY_MAX_ADDR; i++)
- if (bus->irq[i] != PHY_POLL)
- irq_dispose_mapping(bus->irq[i]);
kfree(priv);
- kfree(bus->irq);
mdiobus_free(bus);
return 0;
}
-
static struct of_device_id mpc52xx_fec_mdio_match[] = {
{ .compatible = "fsl,mpc5200b-mdio", },
{ .compatible = "fsl,mpc5200-mdio", },
@@ -171,5 +158,4 @@ struct of_platform_driver mpc52xx_fec_mdio_driver = {
/* let fec driver call it, since this has to be registered before it */
EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
-
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
index 1f7d865cedb6..bd17a2a0139b 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/fsl_pq_mdio.h
@@ -39,7 +39,7 @@ struct fsl_pq_mdio {
u8 reserved[28]; /* Space holder */
u32 utbipar; /* TBI phy address reg (only on UCC) */
u8 res4[2728];
-} __attribute__ ((packed));
+} __packed;
int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 28b53d1cd4f1..c52f3712ecd3 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -685,8 +685,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
priv->rx_queue[i] = NULL;
for (i = 0; i < priv->num_tx_queues; i++) {
- priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc(
- sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
+ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+ GFP_KERNEL);
if (!priv->tx_queue[i]) {
err = -ENOMEM;
goto tx_alloc_failed;
@@ -698,8 +698,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
}
for (i = 0; i < priv->num_rx_queues; i++) {
- priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
- sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
+ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+ GFP_KERNEL);
if (!priv->rx_queue[i]) {
err = -ENOMEM;
goto rx_alloc_failed;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 3a029d02c2b4..4d09eab3548e 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1555,7 +1555,6 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
}
/* setup NAPI */
- memset(&greth->napi, 0, sizeof(greth->napi));
netif_napi_add(dev, &greth->napi, greth_poll, 64);
return 0;
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 0dbd1932b72f..36c3060411d2 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -273,7 +273,7 @@ struct OboeSlot
__u8 control; /*Slot control/status see below */
__u32 address; /*Slot buffer address */
}
-__attribute__ ((packed));
+__packed;
#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index ac0443d52e50..58ddb5214916 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -125,7 +125,7 @@ struct irda_class_desc {
__u8 bmAdditionalBOFs;
__u8 bIrdaRateSniff;
__u8 bMaxUnicastList;
-} __attribute__ ((packed));
+} __packed;
/* class specific interface request to get the IrDA-USB class descriptor
* (6.2.5, USB-IrDA class spec 1.0) */
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index b54d3b48045e..1046014dd6c2 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -154,7 +154,7 @@ struct ks959_speedparams {
__le32 baudrate; /* baud rate, little endian */
__u8 flags;
__u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
#define KS_DATA_5_BITS 0x00
#define KS_DATA_6_BITS 0x01
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 8d713ebac15b..9cc142fcc712 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -117,7 +117,7 @@ struct ksdazzle_speedparams {
__le32 baudrate; /* baud rate, little endian */
__u8 flags;
__u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
#define KS_DATA_5_BITS 0x00
#define KS_DATA_6_BITS 0x01
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 3050d1a0cccf..3f24a1f33022 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -544,9 +544,9 @@ struct ring_descr_hw {
struct {
u8 addr_res[3];
volatile u8 status; /* descriptor status */
- } __attribute__((packed)) rd_s;
- } __attribute((packed)) rd_u;
-} __attribute__ ((packed));
+ } __packed rd_s;
+ } __packed rd_u;
+} __packed;
#define rd_addr rd_u.addr
#define rd_status rd_u.rd_s.status
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ffae480587ae..9e15eb93860e 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -44,11 +44,9 @@
#include <linux/dca.h>
#endif
-#define PFX "ixgbe: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
- __func__ , ## args)))
+/* common prefix used by pr_<> macros */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* TX/RX descriptor defines */
#define IXGBE_DEFAULT_TXD 512
@@ -112,7 +110,6 @@ struct vf_data_storage {
u16 vlans_enabled;
bool clear_to_send;
bool pf_set_mac;
- int rar;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index a4e2901f2f08..976fd9e146c6 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -707,9 +707,8 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
out:
if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
- netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
- " downgraded the link speed from the maximum"
- " advertised\n");
+ e_info("Smartspeed has downgraded the link speed from "
+ "the maximum advertised\n");
return status;
}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 3080afb12bdf..d5d3aae8524b 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -105,12 +105,26 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
-#ifdef DEBUG
-extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw);
+extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
- printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg)
-#else
-#define hw_dbg(hw, format, arg...) do {} while (0)
-#endif
+ netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
+#define e_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define e_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define e_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+#define e_notice(format, arg...) \
+ netdev_notice(adapter->netdev, format, ## arg)
+#define e_crit(format, arg...) \
+ netdev_crit(adapter->netdev, format, ## arg)
+#define e_dev_info(format, arg...) \
+ dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+ dev_warn(&adapter->pdev->dev, format, ## arg)
+#define e_dev_err(format, arg...) \
+ dev_err(&adapter->pdev->dev, format, ## arg)
+#define e_dev_notice(format, arg...) \
+ dev_notice(&adapter->pdev->dev, format, ## arg)
#endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 71da325dfa80..657623589d53 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -121,7 +121,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
goto out;
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
- DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n");
+ e_err("Enable failed, needs MSI-X\n");
err = 1;
goto out;
}
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3a93a81872b8..873b45efca40 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -294,8 +294,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
hw->mac.autotry_restart = true;
err = hw->mac.ops.setup_link(hw, advertised, true, true);
if (err) {
- DPRINTK(PROBE, INFO,
- "setup link failed with code %d\n", err);
+ e_info("setup link failed with code %d\n", err);
hw->mac.ops.setup_link(hw, old, true, true);
}
} else {
@@ -1188,9 +1187,9 @@ static struct ixgbe_reg_test reg_test_82598[] = {
writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if (val != (_test[pat] & W & M)) { \
- DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\
- "0x%08X expected 0x%08X\n", \
- R, val, (_test[pat] & W & M)); \
+ e_err("pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (_test[pat] & W & M)); \
*data = R; \
writel(before, adapter->hw.hw_addr + R); \
return 1; \
@@ -1206,8 +1205,8 @@ static struct ixgbe_reg_test reg_test_82598[] = {
writel((W & M), (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if ((W & M) != (val & M)) { \
- DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
- "expected 0x%08X\n", R, (val & M), (W & M)); \
+ e_err("set/check reg %04X test failed: got 0x%08X " \
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
*data = R; \
writel(before, (adapter->hw.hw_addr + R)); \
return 1; \
@@ -1240,8 +1239,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
if (value != after) {
- DPRINTK(DRV, ERR, "failed STATUS register test got: "
- "0x%08X expected: 0x%08X\n", after, value);
+ e_err("failed STATUS register test got: 0x%08X expected: "
+ "0x%08X\n", after, value);
*data = 1;
return 1;
}
@@ -1341,8 +1340,8 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
- DPRINTK(HW, INFO, "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
+ e_info("testing %s interrupt\n", shared_int ?
+ "shared" : "unshared");
/* Disable all the interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
@@ -1847,7 +1846,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
- DPRINTK(HW, INFO, "offline testing starting\n");
+ e_info("offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
@@ -1880,17 +1879,17 @@ static void ixgbe_diag_test(struct net_device *netdev,
else
ixgbe_reset(adapter);
- DPRINTK(HW, INFO, "register testing starting\n");
+ e_info("register testing starting\n");
if (ixgbe_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
ixgbe_reset(adapter);
- DPRINTK(HW, INFO, "eeprom testing starting\n");
+ e_info("eeprom testing starting\n");
if (ixgbe_eeprom_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED;
ixgbe_reset(adapter);
- DPRINTK(HW, INFO, "interrupt testing starting\n");
+ e_info("interrupt testing starting\n");
if (ixgbe_intr_test(adapter, &data[2]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1898,14 +1897,13 @@ static void ixgbe_diag_test(struct net_device *netdev,
* loopback diagnostic. */
if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
IXGBE_FLAG_VMDQ_ENABLED)) {
- DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
- "mode\n");
+ e_info("Skip MAC loopback diagnostic in VT mode\n");
data[3] = 0;
goto skip_loopback;
}
ixgbe_reset(adapter);
- DPRINTK(HW, INFO, "loopback testing starting\n");
+ e_info("loopback testing starting\n");
if (ixgbe_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1916,7 +1914,7 @@ skip_loopback:
if (if_running)
dev_open(netdev);
} else {
- DPRINTK(HW, INFO, "online testing starting\n");
+ e_info("online testing starting\n");
/* Online tests */
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -2134,8 +2132,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
if (netdev->features & NETIF_F_LRO) {
netdev->features &= ~NETIF_F_LRO;
- DPRINTK(PROBE, INFO, "rx-usecs set to 0, "
- "disabling LRO/RSC\n");
+ e_info("rx-usecs set to 0, disabling RSC\n");
}
need_reset = true;
}
@@ -2232,7 +2229,7 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
} else if (!adapter->rx_itr_setting) {
netdev->features &= ~ETH_FLAG_LRO;
if (data & ETH_FLAG_LRO)
- DPRINTK(PROBE, INFO, "rx-usecs set to 0, "
+ e_info("rx-usecs set to 0, "
"LRO/RSC cannot be enabled.\n");
}
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 45182ab41d6b..84e1194e0833 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -25,7 +25,6 @@
*******************************************************************************/
-
#include "ixgbe.h"
#ifdef CONFIG_IXGBE_DCB
#include "ixgbe_dcb_82599.h"
@@ -165,20 +164,20 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
adapter = netdev_priv(netdev);
if (xid >= IXGBE_FCOE_DDP_MAX) {
- DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid);
+ e_warn("xid=0x%x out-of-range\n", xid);
return 0;
}
fcoe = &adapter->fcoe;
if (!fcoe->pool) {
- DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid);
+ e_warn("xid=0x%x no ddp pool for fcoe\n", xid);
return 0;
}
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
- DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
- xid, ddp->sgl, ddp->sgc);
+ e_err("xid 0x%x w/ non-null sgl=%p nents=%d\n",
+ xid, ddp->sgl, ddp->sgc);
return 0;
}
ixgbe_fcoe_clear_ddp(ddp);
@@ -186,14 +185,14 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
/* setup dma from scsi command sgl */
dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) {
- DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid);
+ e_err("xid 0x%x DMA map error\n", xid);
return 0;
}
/* alloc the udl from our ddp pool */
ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp);
if (!ddp->udl) {
- DPRINTK(DRV, ERR, "failed allocated ddp context\n");
+ e_err("failed allocated ddp context\n");
goto out_noddp_unmap;
}
ddp->sgl = sgl;
@@ -206,10 +205,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= IXGBE_BUFFCNT_MAX) {
- netif_err(adapter, drv, adapter->netdev,
- "xid=%x:%d,%d,%d:addr=%llx "
- "not enough descriptors\n",
- xid, i, j, dmacount, (u64)addr);
+ e_err("xid=%x:%d,%d,%d:addr=%llx "
+ "not enough descriptors\n",
+ xid, i, j, dmacount, (u64)addr);
goto out_noddp_free;
}
@@ -387,8 +385,8 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
struct fc_frame_header *fh;
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
- DPRINTK(DRV, ERR, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
- skb_shinfo(skb)->gso_type);
+ e_err("Wrong gso type %d:expecting SKB_GSO_FCOE\n",
+ skb_shinfo(skb)->gso_type);
return -EINVAL;
}
@@ -414,7 +412,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
break;
default:
- DPRINTK(DRV, WARNING, "unknown sof = 0x%x\n", sof);
+ e_warn("unknown sof = 0x%x\n", sof);
return -EINVAL;
}
@@ -441,7 +439,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
break;
default:
- DPRINTK(DRV, WARNING, "unknown eof = 0x%x\n", eof);
+ e_warn("unknown eof = 0x%x\n", eof);
return -EINVAL;
}
@@ -517,8 +515,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
adapter->pdev, IXGBE_FCPTR_MAX,
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
if (!fcoe->pool)
- DPRINTK(DRV, ERR,
- "failed to allocated FCoE DDP pool\n");
+ e_err("failed to allocated FCoE DDP pool\n");
spin_lock_init(&fcoe->lock);
}
@@ -614,7 +611,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
goto out_enable;
- DPRINTK(DRV, INFO, "Enabling FCoE offload features.\n");
+ e_info("Enabling FCoE offload features.\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
@@ -660,7 +657,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
goto out_disable;
- DPRINTK(DRV, INFO, "Disabling FCoE offload features.\n");
+ e_info("Disabling FCoE offload features.\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ce30c62a97f7..ebc4b04fdef2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -696,19 +696,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
/* detected Tx unit hang */
union ixgbe_adv_tx_desc *tx_desc;
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
- " Tx Queue <%d>\n"
- " TDH, TDT <%x>, <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "tx_buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->queue_index,
- IXGBE_READ_REG(hw, tx_ring->head),
- IXGBE_READ_REG(hw, tx_ring->tail),
- tx_ring->next_to_use, eop,
- tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+ e_err("Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, tx_ring->head),
+ IXGBE_READ_REG(hw, tx_ring->tail),
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
return true;
}
@@ -812,9 +812,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (adapter->detect_tx_hung) {
if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
/* schedule immediate reset if we believe we hung */
- DPRINTK(PROBE, INFO,
- "tx hang %d detected, resetting adapter\n",
- adapter->tx_timeout_count + 1);
+ e_info("tx hang %d detected, resetting adapter\n",
+ adapter->tx_timeout_count + 1);
ixgbe_tx_timeout(adapter->netdev);
}
}
@@ -1653,10 +1652,10 @@ static void ixgbe_check_overtemp_task(struct work_struct *work)
return;
break;
}
- DPRINTK(DRV, ERR, "Network adapter has been stopped because it "
- "has over heated. Restart the computer. If the problem "
- "persists, power off the system and replace the "
- "adapter\n");
+ e_crit("Network adapter has been stopped because it "
+ "has over heated. Restart the computer. If the problem "
+ "persists, power off the system and replace the "
+ "adapter\n");
/* write to clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
}
@@ -1668,7 +1667,7 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
(eicr & IXGBE_EICR_GPI_SDP1)) {
- DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
+ e_crit("Fan has stopped, replace the adapter\n");
/* write to clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
}
@@ -2154,9 +2153,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
handler, 0, adapter->name[vector],
adapter->q_vector[vector]);
if (err) {
- DPRINTK(PROBE, ERR,
- "request_irq failed for MSIX interrupt "
- "Error: %d\n", err);
+ e_err("request_irq failed for MSIX interrupt: "
+ "Error: %d\n", err);
goto free_queue_irqs;
}
}
@@ -2165,8 +2163,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
if (err) {
- DPRINTK(PROBE, ERR,
- "request_irq for msix_lsc failed: %d\n", err);
+ e_err("request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs;
}
@@ -2352,7 +2349,7 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
}
if (err)
- DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
+ e_err("request_irq failed, Error %d\n", err);
return err;
}
@@ -2423,7 +2420,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
map_vector_to_rxq(adapter, 0, 0);
map_vector_to_txq(adapter, 0, 0);
- DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
+ e_info("Legacy interrupt IVAR setup done\n");
}
/**
@@ -2995,6 +2992,48 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int vfn = adapter->num_vfs;
+ unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+ /* return error if we do not support writing to RAR table */
+ if (!hw->mac.ops.set_rar)
+ return -ENOMEM;
+
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (!rar_entries)
+ break;
+ hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
+ vfn, IXGBE_RAH_AV);
+ count++;
+ }
+ }
+ /* write the addresses in reverse order to avoid write combining */
+ for (; rar_entries > 0 ; rar_entries--)
+ hw->mac.ops.clear_rar(hw, rar_entries);
+
+ return count;
+}
+
+/**
* ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -3007,38 +3046,58 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 fctrl;
+ u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ int count;
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ /* clear the bits we are changing the status of */
+ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
/* don't hardware filter vlans in promisc mode */
ixgbe_vlan_filter_disable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
- fctrl &= ~IXGBE_FCTRL_UPE;
- } else if (!hw->addr_ctrl.uc_set_promisc) {
- fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscous mode so
+ * that we can at least receive multicast traffic
+ */
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ vmolr |= IXGBE_VMOLR_ROMPE;
}
ixgbe_vlan_filter_enable(adapter);
hw->addr_ctrl.user_set_promisc = false;
+ /*
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscous mode
+ */
+ count = ixgbe_write_uc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_UPE;
+ vmolr |= IXGBE_VMOLR_ROPE;
+ }
}
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-
- /* reprogram secondary unicast list */
- hw->mac.ops.update_uc_addr_list(hw, netdev);
-
- /* reprogram multicast list */
- hw->mac.ops.update_mc_addr_list(hw, netdev);
-
- if (adapter->num_vfs)
+ if (adapter->num_vfs) {
ixgbe_restore_vf_multicasts(adapter);
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+ ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -3257,8 +3316,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
msleep(1);
}
if (k >= IXGBE_MAX_RX_DESC_POLL) {
- DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
- "not set within the polling period\n", rxr);
+ e_err("RXDCTL.ENABLE on Rx queue %d not set within "
+ "the polling period\n", rxr);
}
ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
(adapter->rx_ring[rxr]->count - 1));
@@ -3387,8 +3446,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
} while (--wait_loop &&
!(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
- DPRINTK(DRV, ERR, "Could not enable "
- "Tx Queue %d\n", j);
+ e_err("Could not enable Tx Queue %d\n", j);
}
}
@@ -3436,8 +3494,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
if (esdp & IXGBE_ESDP_SDP1)
- DPRINTK(DRV, CRIT,
- "Fan has stopped, replace the adapter\n");
+ e_crit("Fan has stopped, replace the adapter\n");
}
/*
@@ -3466,7 +3523,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
} else {
err = ixgbe_non_sfp_link_config(hw);
if (err)
- DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
+ e_err("link_config FAILED %d\n", err);
}
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3527,19 +3584,19 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
case IXGBE_ERR_SFP_NOT_PRESENT:
break;
case IXGBE_ERR_MASTER_REQUESTS_PENDING:
- dev_err(&adapter->pdev->dev, "master disable timed out\n");
+ e_dev_err("master disable timed out\n");
break;
case IXGBE_ERR_EEPROM_VERSION:
/* We are running on a pre-production device, log a warning */
- dev_warn(&adapter->pdev->dev, "This device is a pre-production "
- "adapter/LOM. Please be aware there may be issues "
- "associated with your hardware. If you are "
- "experiencing problems please contact your Intel or "
- "hardware representative who provided you with this "
- "hardware.\n");
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issuesassociated with "
+ "your hardware. If you are experiencing problems "
+ "please contact your Intel or hardware "
+ "representative who provided you with this "
+ "hardware.\n");
break;
default:
- dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+ e_dev_err("Hardware Error: %d\n", err);
}
/* reprogram the RAR[0] in case user changed it. */
@@ -3920,12 +3977,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
adapter->num_tx_queues = 1;
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
+ e_info("FCoE enabled with DCB\n");
ixgbe_set_dcb_queues(adapter);
}
#endif
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
+ e_info("FCoE enabled with RSS\n");
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_set_fdir_queues(adapter);
@@ -4038,7 +4095,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
*/
- DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
+ netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+ "Unable to allocate MSI-X interrupts\n");
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -4435,8 +4493,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
if (!err) {
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
} else {
- DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
- "falling back to legacy. Error: %d\n", err);
+ netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
+ "Unable to allocate MSI interrupt, "
+ "falling back to legacy. Error: %d\n", err);
/* reset err */
err = 0;
}
@@ -4557,27 +4616,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
err = ixgbe_set_interrupt_capability(adapter);
if (err) {
- DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
+ e_dev_err("Unable to setup interrupt capabilities\n");
goto err_set_interrupt;
}
err = ixgbe_alloc_q_vectors(adapter);
if (err) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
- "vectors\n");
+ e_dev_err("Unable to allocate memory for queue vectors\n");
goto err_alloc_q_vectors;
}
err = ixgbe_alloc_queues(adapter);
if (err) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ e_dev_err("Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
- DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
- "Tx Queue count = %u\n",
- (adapter->num_rx_queues > 1) ? "Enabled" :
- "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+ e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+ adapter->num_rx_queues, adapter->num_tx_queues);
set_bit(__IXGBE_DOWN, &adapter->state);
@@ -4648,15 +4705,13 @@ static void ixgbe_sfp_task(struct work_struct *work)
goto reschedule;
ret = hw->phy.ops.reset(hw);
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- dev_err(&adapter->pdev->dev, "failed to initialize "
- "because an unsupported SFP+ module type "
- "was detected.\n"
- "Reload the driver after installing a "
- "supported module.\n");
+ e_dev_err("failed to initialize because an unsupported "
+ "SFP+ module type was detected.\n");
+ e_dev_err("Reload the driver after installing a "
+ "supported module.\n");
unregister_netdev(adapter->netdev);
} else {
- DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
- hw->phy.sfp_type);
+ e_info("detected SFP+: %d\n", hw->phy.sfp_type);
}
/* don't need this routine any more */
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
@@ -4783,7 +4838,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* initialize eeprom parameters */
if (ixgbe_init_eeprom_params_generic(hw)) {
- dev_err(&pdev->dev, "EEPROM initialization failed\n");
+ e_dev_err("EEPROM initialization failed\n");
return -EIO;
}
@@ -4836,8 +4891,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
err:
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
- "descriptor ring\n");
+ e_err("Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -4859,7 +4913,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
if (!err)
continue;
- DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
+ e_err("Allocation for Tx Queue %u failed\n", i);
break;
}
@@ -4884,8 +4938,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
if (!rx_ring->rx_buffer_info)
rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info) {
- DPRINTK(PROBE, ERR,
- "vmalloc allocation failed for the rx desc ring\n");
+ e_err("vmalloc allocation failed for the Rx desc ring\n");
goto alloc_failed;
}
memset(rx_ring->rx_buffer_info, 0, size);
@@ -4898,8 +4951,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
- DPRINTK(PROBE, ERR,
- "Memory allocation failed for the rx desc ring\n");
+ e_err("Memory allocation failed for the Rx desc ring\n");
vfree(rx_ring->rx_buffer_info);
goto alloc_failed;
}
@@ -4932,7 +4984,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
- DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
+ e_err("Allocation for Rx Queue %u failed\n", i);
break;
}
@@ -5031,8 +5083,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
return -EINVAL;
- DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
- netdev->mtu, new_mtu);
+ e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
@@ -5145,8 +5196,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
- "suspend\n");
+ e_dev_err("Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -5155,8 +5205,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
err = ixgbe_init_interrupt_scheme(adapter);
if (err) {
- printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
- "device\n");
+ e_dev_err("Cannot initialize interrupts for device\n");
return err;
}
@@ -5516,10 +5565,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
err = hw->phy.ops.identify_sfp(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- dev_err(&adapter->pdev->dev, "failed to initialize because "
- "an unsupported SFP+ module type was detected.\n"
- "Reload the driver after installing a supported "
- "module.\n");
+ e_dev_err("failed to initialize because an unsupported SFP+ "
+ "module type was detected.\n");
+ e_dev_err("Reload the driver after installing a supported "
+ "module.\n");
unregister_netdev(adapter->netdev);
return;
}
@@ -5548,8 +5597,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
set_bit(__IXGBE_FDIR_INIT_DONE,
&(adapter->tx_ring[i]->reinit_state));
} else {
- DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
- "ignored adding FDIR ATR filters\n");
+ e_err("failed to finish FDIR re-initialization, "
+ "ignored adding FDIR ATR filters\n");
}
/* Done FDIR Re-initialization, enable transmits */
netif_tx_start_all_queues(adapter->netdev);
@@ -5620,16 +5669,14 @@ static void ixgbe_watchdog_task(struct work_struct *work)
flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
}
- printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
- "Flow Control: %s\n",
- netdev->name,
+ e_info("NIC Link is Up %s, Flow Control: %s\n",
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
- "10 Gbps" :
- (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
- "1 Gbps" : "unknown speed")),
+ "10 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ "1 Gbps" : "unknown speed")),
((flow_rx && flow_tx) ? "RX/TX" :
- (flow_rx ? "RX" :
- (flow_tx ? "TX" : "None"))));
+ (flow_rx ? "RX" :
+ (flow_tx ? "TX" : "None"))));
netif_carrier_on(netdev);
} else {
@@ -5640,8 +5687,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
adapter->link_up = false;
adapter->link_speed = 0;
if (netif_carrier_ok(netdev)) {
- printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
- netdev->name);
+ e_info("NIC Link is Down\n");
netif_carrier_off(netdev);
}
}
@@ -5817,9 +5863,8 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit())) {
- DPRINTK(PROBE, WARNING,
- "partial checksum but proto=%x!\n",
- skb->protocol);
+ e_warn("partial checksum but "
+ "proto=%x!\n", skb->protocol);
}
break;
}
@@ -5930,7 +5975,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
return count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ e_dev_err("TX DMA map failed\n");
/* clear timestamp and dma mappings for failed tx_buffer_info map */
tx_buffer_info->dma = 0;
@@ -6427,8 +6472,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
if (err) {
- DPRINTK(PROBE, ERR,
- "Failed to enable PCI sriov: %d\n", err);
+ e_err("Failed to enable PCI sriov: %d\n", err);
goto err_novfs;
}
/* If call to enable VFs succeeded then allocate memory
@@ -6452,9 +6496,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
}
/* Oh oh */
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for VF "
- "Data Storage - SRIOV disabled\n");
+ e_err("Unable to allocate memory for VF Data Storage - SRIOV "
+ "disabled\n");
pci_disable_sriov(adapter->pdev);
err_novfs:
@@ -6502,8 +6545,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
+ e_dev_err("No usable DMA configuration, "
+ "aborting\n");
goto err_dma;
}
}
@@ -6513,8 +6556,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
IORESOURCE_MEM), ixgbe_driver_name);
if (err) {
- dev_err(&pdev->dev,
- "pci_request_selected_regions failed 0x%x\n", err);
+ e_dev_err("pci_request_selected_regions failed 0x%x\n", err);
goto err_pci_reg;
}
@@ -6625,8 +6667,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
if (esdp & IXGBE_ESDP_SDP1)
- DPRINTK(PROBE, CRIT,
- "Fan has stopped, replace the adapter\n");
+ e_crit("Fan has stopped, replace the adapter\n");
}
/* reset_hw fills in the perm_addr as well */
@@ -6645,19 +6686,19 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
round_jiffies(jiffies + (2 * HZ)));
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- dev_err(&adapter->pdev->dev, "failed to initialize because "
- "an unsupported SFP+ module type was detected.\n"
- "Reload the driver after installing a supported "
- "module.\n");
+ e_dev_err("failed to initialize because an unsupported SFP+ "
+ "module type was detected.\n");
+ e_dev_err("Reload the driver after installing a supported "
+ "module.\n");
goto err_sw_init;
} else if (err) {
- dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
+ e_dev_err("HW Init failed: %d\n", err);
goto err_sw_init;
}
ixgbe_probe_vf(adapter, ii);
- netdev->features = NETIF_F_SG |
+ netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
@@ -6704,7 +6745,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
- dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
+ e_dev_err("The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -6713,7 +6754,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
- dev_err(&pdev->dev, "invalid MAC address\n");
+ e_dev_err("invalid MAC address\n");
err = -EIO;
goto err_eeprom;
}
@@ -6748,7 +6789,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->mac.ops.get_bus_info(hw);
/* print bus type/speed/width info */
- dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
+ e_dev_info("(PCI Express:%s:%s) %pM\n",
((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
(hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
@@ -6758,20 +6799,20 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->dev_addr);
ixgbe_read_pba_num_generic(hw, &part_num);
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
- hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- (part_num >> 8), (part_num & 0xff));
+ e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
+ "PBA No: %06x-%03x\n",
+ hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+ (part_num >> 8), (part_num & 0xff));
else
- dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
- hw->mac.type, hw->phy.type,
- (part_num >> 8), (part_num & 0xff));
+ e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
+ hw->mac.type, hw->phy.type,
+ (part_num >> 8), (part_num & 0xff));
if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
- dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
- "this card is not sufficient for optimal "
- "performance.\n");
- dev_warn(&pdev->dev, "For optimal performance a x8 "
- "PCI-Express slot is required.\n");
+ e_dev_warn("PCI-Express bandwidth available for this card is "
+ "not sufficient for optimal performance.\n");
+ e_dev_warn("For optimal performance a x8 PCI-Express slot "
+ "is required.\n");
}
/* save off EEPROM version number */
@@ -6782,12 +6823,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err == IXGBE_ERR_EEPROM_VERSION) {
/* We are running on a pre-production device, log a warning */
- dev_warn(&pdev->dev, "This device is a pre-production "
- "adapter/LOM. Please be aware there may be issues "
- "associated with your hardware. If you are "
- "experiencing problems please contact your Intel or "
- "hardware representative who provided you with this "
- "hardware.\n");
+ e_dev_warn("This device is a pre-production adapter/LOM. "
+ "Please be aware there may be issues associated "
+ "with your hardware. If you are experiencing "
+ "problems please contact your Intel or hardware "
+ "representative who provided you with this "
+ "hardware.\n");
}
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
@@ -6810,8 +6851,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
}
#endif
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
- adapter->num_vfs);
+ e_info("IOV is enabled with %d VFs\n", adapter->num_vfs);
for (i = 0; i < adapter->num_vfs; i++)
ixgbe_vf_configuration(pdev, (i | 0x10000000));
}
@@ -6819,7 +6859,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
- dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
+ e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
cards_found++;
return 0;
@@ -6909,7 +6949,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
pci_release_selected_regions(pdev, pci_select_bars(pdev,
IORESOURCE_MEM));
- DPRINTK(PROBE, INFO, "complete\n");
+ e_dev_info("complete\n");
free_netdev(netdev);
@@ -6959,8 +6999,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
int err;
if (pci_enable_device_mem(pdev)) {
- DPRINTK(PROBE, ERR,
- "Cannot re-enable PCI device after reset.\n");
+ e_err("Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
@@ -6976,8 +7015,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
err = pci_cleanup_aer_uncorrect_error_status(pdev);
if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
+ e_dev_err("pci_cleanup_aer_uncorrect_error_status "
+ "failed 0x%0x\n", err);
/* non-fatal, continue */
}
@@ -6998,7 +7037,7 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (ixgbe_up(adapter)) {
- DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
+ e_info("ixgbe_up failed after reset\n");
return;
}
}
@@ -7034,10 +7073,9 @@ static struct pci_driver ixgbe_driver = {
static int __init ixgbe_init_module(void)
{
int ret;
- printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
- ixgbe_driver_string, ixgbe_driver_version);
-
- printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
+ pr_info("%s - version %s\n", ixgbe_driver_string,
+ ixgbe_driver_version);
+ pr_info("%s\n", ixgbe_copyright);
#ifdef CONFIG_IXGBE_DCA
dca_register_notify(&dca_notifier);
@@ -7076,18 +7114,17 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
}
#endif /* CONFIG_IXGBE_DCA */
-#ifdef DEBUG
+
/**
- * ixgbe_get_hw_dev_name - return device name string
+ * ixgbe_get_hw_dev return device
* used by hardware layer to print debugging information
**/
-char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
+struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
- return adapter->netdev->name;
+ return adapter->netdev;
}
-#endif
module_exit(ixgbe_exit_module);
/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index f6cee94ec8e8..6e6dee04ff61 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -25,7 +25,6 @@
*******************************************************************************/
-
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -138,6 +137,7 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
/* reset offloads to defaults */
if (adapter->vfinfo[vf].pf_vlan) {
@@ -159,26 +159,17 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
- if (adapter->vfinfo[vf].rar > 0) {
- adapter->hw.mac.ops.clear_rar(&adapter->hw,
- adapter->vfinfo[vf].rar);
- adapter->vfinfo[vf].rar = -1;
- }
+ hw->mac.ops.clear_rar(hw, rar_entry);
}
int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
struct ixgbe_hw *hw = &adapter->hw;
-
- adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
- vf, IXGBE_RAH_AV);
- if (adapter->vfinfo[vf].rar < 0) {
- DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
- return -1;
- }
+ int rar_entry = hw->mac.num_rar_entries - (vf + 1);
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+ hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
return 0;
}
@@ -194,11 +185,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
if (enable) {
random_ether_addr(vf_mac_addr);
- DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
- "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
- vfn,
- vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
- vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+ e_info("IOV: VF %d is enabled MAC %pM\n", vfn, vf_mac_addr);
/*
* Store away the VF "permananet" MAC address, it will ask
* for it later.
@@ -243,7 +230,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
if (retval)
- printk(KERN_ERR "Error receiving message from VF\n");
+ pr_err("Error receiving message from VF\n");
/* this is a message we already processed, do nothing */
if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
@@ -257,7 +244,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
if (msgbuf[0] == IXGBE_VF_RESET) {
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
u8 *addr = (u8 *)(&msgbuf[1]);
- DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
+ e_info("VF Reset msg received from vf %d\n", vf);
adapter->vfinfo[vf].clear_to_send = false;
ixgbe_vf_reset_msg(adapter, vf);
adapter->vfinfo[vf].clear_to_send = true;
@@ -310,7 +297,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
break;
default:
- DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ e_err("Unhandled Msg %8.8x\n", msgbuf[0]);
retval = IXGBE_ERR_MBX;
break;
}
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index a16cff7e54a3..73f1e75f68d4 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -3411,6 +3411,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_IPV6_CSUM;
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
+ netdev->features |= NETIF_F_GRO;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_IP_CSUM;
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 7805bbf1d53a..62362b4a8c56 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -5718,7 +5718,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
* from the bridge.
*/
if ((hw->features & STP_SUPPORT) && !promiscuous &&
- dev->br_port) {
+ (dev->priv_flags & IFF_BRIDGE_PORT)) {
struct ksz_switch *sw = hw->ksz_switch;
int port = priv->port.first_port;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 72b7949c91b1..09334f8f148b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -60,11 +60,51 @@
#include <net/net_namespace.h>
struct pcpu_lstats {
- unsigned long packets;
- unsigned long bytes;
+ u64 packets;
+ u64 bytes;
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ seqcount_t seq;
+#endif
unsigned long drops;
};
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+static void inline lstats_update_begin(struct pcpu_lstats *lstats)
+{
+ write_seqcount_begin(&lstats->seq);
+}
+static void inline lstats_update_end(struct pcpu_lstats *lstats)
+{
+ write_seqcount_end(&lstats->seq);
+}
+static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
+{
+ u64 tpackets, tbytes;
+ unsigned int seq;
+
+ do {
+ seq = read_seqcount_begin(&lstats->seq);
+ tpackets = lstats->packets;
+ tbytes = lstats->bytes;
+ } while (read_seqcount_retry(&lstats->seq, seq));
+
+ *packets += tpackets;
+ *bytes += tbytes;
+}
+#else
+static void inline lstats_update_begin(struct pcpu_lstats *lstats)
+{
+}
+static void inline lstats_update_end(struct pcpu_lstats *lstats)
+{
+}
+static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
+{
+ *packets += lstats->packets;
+ *bytes += lstats->bytes;
+}
+#endif
+
/*
* The higher levels take care of making this non-reentrant (it's
* called with bh's disabled).
@@ -86,21 +126,23 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
len = skb->len;
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
+ lstats_update_begin(lb_stats);
lb_stats->bytes += len;
lb_stats->packets++;
+ lstats_update_end(lb_stats);
} else
lb_stats->drops++;
return NETDEV_TX_OK;
}
-static struct net_device_stats *loopback_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev)
{
const struct pcpu_lstats __percpu *pcpu_lstats;
- struct net_device_stats *stats = &dev->stats;
- unsigned long bytes = 0;
- unsigned long packets = 0;
- unsigned long drops = 0;
+ struct rtnl_link_stats64 *stats = &dev->stats64;
+ u64 bytes = 0;
+ u64 packets = 0;
+ u64 drops = 0;
int i;
pcpu_lstats = (void __percpu __force *)dev->ml_priv;
@@ -108,8 +150,7 @@ static struct net_device_stats *loopback_get_stats(struct net_device *dev)
const struct pcpu_lstats *lb_stats;
lb_stats = per_cpu_ptr(pcpu_lstats, i);
- bytes += lb_stats->bytes;
- packets += lb_stats->packets;
+ lstats_fetch_and_add(&packets, &bytes, lb_stats);
drops += lb_stats->drops;
}
stats->rx_packets = packets;
@@ -158,7 +199,7 @@ static void loopback_dev_free(struct net_device *dev)
static const struct net_device_ops loopback_ops = {
.ndo_init = loopback_dev_init,
.ndo_start_xmit= loopback_xmit,
- .ndo_get_stats = loopback_get_stats,
+ .ndo_get_stats64 = loopback_get_stats64,
};
/*
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 1136c9a22b67..3832fa4961dd 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -157,6 +157,8 @@ static void dayna_block_output(struct net_device *dev, int count,
#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
+#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
+
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
@@ -164,8 +166,8 @@ static void slow_sane_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void slow_sane_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-static void word_memcpy_tocard(void *tp, const void *fp, int count);
-static void word_memcpy_fromcard(void *tp, const void *fp, int count);
+static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
+static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
@@ -245,9 +247,9 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
/* Try writing 32 bits */
- memcpy(membase, &outdata, 4);
+ memcpy_toio(membase, &outdata, 4);
/* Now compare them */
- if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
+ if (memcmp_withio(&outdata, membase, 4) == 0)
return ACCESS_32;
/* Write 16 bit output */
word_memcpy_tocard(membase, &outdata, 4);
@@ -554,7 +556,7 @@ static int __init mac8390_initdev(struct net_device *dev,
case MAC8390_APPLE:
switch (mac8390_testio(dev->mem_start)) {
case ACCESS_UNKNOWN:
- pr_info("Don't know how to access card memory!\n");
+ pr_err("Don't know how to access card memory!\n");
return -ENODEV;
break;
@@ -641,12 +643,13 @@ static int __init mac8390_initdev(struct net_device *dev,
static int mac8390_open(struct net_device *dev)
{
+ int err;
+
__ei_open(dev);
- if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
- pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
- return -EAGAIN;
- }
- return 0;
+ err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev);
+ if (err)
+ pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return err;
}
static int mac8390_close(struct net_device *dev)
@@ -731,7 +734,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- memcpy_fromio((void *)hdr, (char *)dev->mem_start + hdr_start, 4);
+ memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
/* Fix endianness */
hdr->count = swab16(hdr->count);
}
@@ -745,14 +748,13 @@ static void sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ memcpy_fromio(skb->data, dev->mem_start + xfer_base,
semi_count);
count -= semi_count;
- memcpy_toio(skb->data + semi_count,
- (char *)ei_status.rmem_start, count);
- } else {
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
count);
+ } else {
+ memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
}
}
@@ -761,7 +763,7 @@ static void sane_block_output(struct net_device *dev, int count,
{
long shmem = (start_page - WD_START_PG)<<8;
- memcpy_toio((char *)dev->mem_start + shmem, buf, count);
+ memcpy_toio(dev->mem_start + shmem, buf, count);
}
/* dayna block input/output */
@@ -812,7 +814,7 @@ static void slow_sane_get_8390_hdr(struct net_device *dev,
int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
+ word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4);
/* Register endianism - fix here rather than 8390.c */
hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
}
@@ -826,15 +828,14 @@ static void slow_sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- word_memcpy_fromcard(skb->data,
- (char *)dev->mem_start + xfer_base,
+ word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
semi_count);
count -= semi_count;
word_memcpy_fromcard(skb->data + semi_count,
- (char *)ei_status.rmem_start, count);
+ ei_status.rmem_start, count);
} else {
- word_memcpy_fromcard(skb->data,
- (char *)dev->mem_start + xfer_base, count);
+ word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
+ count);
}
}
@@ -843,12 +844,12 @@ static void slow_sane_block_output(struct net_device *dev, int count,
{
long shmem = (start_page - WD_START_PG)<<8;
- word_memcpy_tocard((char *)dev->mem_start + shmem, buf, count);
+ word_memcpy_tocard(dev->mem_start + shmem, buf, count);
}
-static void word_memcpy_tocard(void *tp, const void *fp, int count)
+static void word_memcpy_tocard(unsigned long tp, const void *fp, int count)
{
- volatile unsigned short *to = tp;
+ volatile unsigned short *to = (void *)tp;
const unsigned short *from = fp;
count++;
@@ -858,10 +859,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
*to++ = *from++;
}
-static void word_memcpy_fromcard(void *tp, const void *fp, int count)
+static void word_memcpy_fromcard(void *tp, unsigned long fp, int count)
{
unsigned short *to = tp;
- const volatile unsigned short *from = fp;
+ const volatile unsigned short *from = (const void *)fp;
count++;
count /= 2;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 87e8d4cb4057..e096875aa055 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -37,8 +37,14 @@ struct macvlan_port {
struct net_device *dev;
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
+ struct rcu_head rcu;
};
+#define macvlan_port_get_rcu(dev) \
+ ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
+#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
+#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
+
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
@@ -145,15 +151,16 @@ static void macvlan_broadcast(struct sk_buff *skb,
}
/* called under rcu_read_lock() from netif_receive_skb */
-static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port,
- struct sk_buff *skb)
+static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
{
+ struct macvlan_port *port;
const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan;
const struct macvlan_dev *src;
struct net_device *dev;
unsigned int len;
+ port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
@@ -515,6 +522,7 @@ static int macvlan_port_create(struct net_device *dev)
{
struct macvlan_port *port;
unsigned int i;
+ int err;
if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
return -EINVAL;
@@ -527,19 +535,32 @@ static int macvlan_port_create(struct net_device *dev)
INIT_LIST_HEAD(&port->vlans);
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
INIT_HLIST_HEAD(&port->vlan_hash[i]);
- rcu_assign_pointer(dev->macvlan_port, port);
- return 0;
+
+ err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
+ if (err)
+ kfree(port);
+
+ dev->priv_flags |= IFF_MACVLAN_PORT;
+ return err;
}
-static void macvlan_port_destroy(struct net_device *dev)
+static void macvlan_port_rcu_free(struct rcu_head *head)
{
- struct macvlan_port *port = dev->macvlan_port;
+ struct macvlan_port *port;
- rcu_assign_pointer(dev->macvlan_port, NULL);
- synchronize_rcu();
+ port = container_of(head, struct macvlan_port, rcu);
kfree(port);
}
+static void macvlan_port_destroy(struct net_device *dev)
+{
+ struct macvlan_port *port = macvlan_port_get(dev);
+
+ dev->priv_flags &= ~IFF_MACVLAN_PORT;
+ netdev_rx_handler_unregister(dev);
+ call_rcu(&port->rcu, macvlan_port_rcu_free);
+}
+
static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
{
if (tb[IFLA_ADDRESS]) {
@@ -615,12 +636,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_ADDRESS])
random_ether_addr(dev->dev_addr);
- if (lowerdev->macvlan_port == NULL) {
+ if (!macvlan_port_exists(lowerdev)) {
err = macvlan_port_create(lowerdev);
if (err < 0)
return err;
}
- port = lowerdev->macvlan_port;
+ port = macvlan_port_get(lowerdev);
vlan->lowerdev = lowerdev;
vlan->dev = dev;
@@ -730,10 +751,11 @@ static int macvlan_device_event(struct notifier_block *unused,
struct macvlan_dev *vlan, *next;
struct macvlan_port *port;
- port = dev->macvlan_port;
- if (port == NULL)
+ if (!macvlan_port_exists(dev))
return NOTIFY_DONE;
+ port = macvlan_port_get(dev);
+
switch (event) {
case NETDEV_CHANGE:
list_for_each_entry(vlan, &port->vlans, list)
@@ -767,14 +789,12 @@ static int __init macvlan_init_module(void)
int err;
register_netdevice_notifier(&macvlan_notifier_block);
- macvlan_handle_frame_hook = macvlan_handle_frame;
err = macvlan_link_register(&macvlan_link_ops);
if (err < 0)
goto err1;
return 0;
err1:
- macvlan_handle_frame_hook = NULL;
unregister_netdevice_notifier(&macvlan_notifier_block);
return err;
}
@@ -782,7 +802,6 @@ err1:
static void __exit macvlan_cleanup_module(void)
{
rtnl_link_unregister(&macvlan_link_ops);
- macvlan_handle_frame_hook = NULL;
unregister_netdevice_notifier(&macvlan_notifier_block);
}
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 96180c0ec206..a0d8a26f5a02 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -961,6 +961,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
+ dev->dev_id = port - 1;
/*
* Initialize driver private data
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 423053482ed5..22d0b3b796b4 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -110,7 +110,7 @@ struct mlx4_eqe {
u32 raw[6];
struct {
__be32 cqn;
- } __attribute__((packed)) comp;
+ } __packed comp;
struct {
u16 reserved1;
__be16 token;
@@ -118,27 +118,27 @@ struct mlx4_eqe {
u8 reserved3[3];
u8 status;
__be64 out_param;
- } __attribute__((packed)) cmd;
+ } __packed cmd;
struct {
__be32 qpn;
- } __attribute__((packed)) qp;
+ } __packed qp;
struct {
__be32 srqn;
- } __attribute__((packed)) srq;
+ } __packed srq;
struct {
__be32 cqn;
u32 reserved1;
u8 reserved2[3];
u8 syndrome;
- } __attribute__((packed)) cq_err;
+ } __packed cq_err;
struct {
u32 reserved1[2];
__be32 port;
- } __attribute__((packed)) port_change;
+ } __packed port_change;
} event;
u8 reserved3[3];
u8 owner;
-} __attribute__((packed));
+} __packed;
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 3dc69be4949f..9c188bdd7f4f 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -58,7 +58,7 @@ struct mlx4_mpt_entry {
__be32 mtt_sz;
__be32 entity_size;
__be32 first_byte_offset;
-} __attribute__((packed));
+} __packed;
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 1b2c29150202..e7b4187da057 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -69,7 +69,6 @@
#define MPHDRLEN 6 /* multilink protocol header length */
#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
-#define MIN_FRAG_SIZE 64
/*
* An instance of /dev/ppp can be associated with either a ppp
@@ -539,14 +538,9 @@ static int get_filter(void __user *arg, struct sock_filter **p)
}
len = uprog.len * sizeof(struct sock_filter);
- code = kmalloc(len, GFP_KERNEL);
- if (code == NULL)
- return -ENOMEM;
-
- if (copy_from_user(code, uprog.filter, len)) {
- kfree(code);
- return -EFAULT;
- }
+ code = memdup_user(uprog.filter, len);
+ if (IS_ERR(code))
+ return PTR_ERR(code);
err = sk_chk_filter(code, uprog.len);
if (err) {
@@ -1933,9 +1927,9 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
/* If the queue is getting long, don't wait any longer for packets
before the start of the queue. */
if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
- struct sk_buff *skb = skb_peek(&ppp->mrq);
- if (seq_before(ppp->minseq, skb->sequence))
- ppp->minseq = skb->sequence;
+ struct sk_buff *mskb = skb_peek(&ppp->mrq);
+ if (seq_before(ppp->minseq, mskb->sequence))
+ ppp->minseq = mskb->sequence;
}
/* Pull completed packets off the queue and receive them. */
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 805b64d1e893..344ef330e123 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -89,7 +89,6 @@
#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
-static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
static const struct proto_ops pppoe_ops;
@@ -949,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
abort:
kfree_skb(skb);
- return 1;
+ return 0;
}
/************************************************************************
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h
index 0a88b535197a..f7e51b7d7049 100644
--- a/drivers/net/ps3_gelic_wireless.h
+++ b/drivers/net/ps3_gelic_wireless.h
@@ -74,7 +74,7 @@ struct gelic_eurus_common_cfg {
u16 bss_type; /* infra or adhoc */
u16 auth_method; /* shared key or open */
u16 op_mode; /* B/G */
-} __attribute__((packed));
+} __packed;
/* for GELIC_EURUS_CMD_WEP_CFG */
@@ -88,7 +88,7 @@ struct gelic_eurus_wep_cfg {
/* all fields are big endian */
u16 security;
u8 key[4][16];
-} __attribute__((packed));
+} __packed;
/* for GELIC_EURUS_CMD_WPA_CFG */
enum gelic_eurus_wpa_security {
@@ -120,7 +120,7 @@ struct gelic_eurus_wpa_cfg {
u16 security;
u16 psk_type; /* psk key encoding type */
u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
-} __attribute__((packed));
+} __packed;
/* for GELIC_EURUS_CMD_{START,GET}_SCAN */
enum gelic_eurus_scan_capability {
@@ -171,7 +171,7 @@ struct gelic_eurus_scan_info {
__be32 reserved3;
__be32 reserved4;
u8 elements[0]; /* ie */
-} __attribute__ ((packed));
+} __packed;
/* the hypervisor returns bbs up to 16 */
#define GELIC_EURUS_MAX_SCAN (16)
@@ -193,7 +193,7 @@ struct gelic_wl_scan_info {
struct gelic_eurus_rssi_info {
/* big endian */
__be16 rssi;
-} __attribute__ ((packed));
+} __packed;
/* for 'stat' member of gelic_wl_info */
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 896d40df9a13..3675678bbf01 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 2
-#define QLCNIC_LINUX_VERSIONID "5.0.2"
+#define _QLCNIC_LINUX_SUBVERSION 6
+#define QLCNIC_LINUX_VERSIONID "5.0.6"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
@@ -68,6 +68,7 @@
#define QLCNIC_DECODE_VERSION(v) \
QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2)
#define QLCNIC_NUM_FLASH_SECTORS (64)
#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
@@ -112,8 +113,10 @@
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
-#define MAX_BUFFERS_PER_CMD 32
-#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
+#define MAX_TSO_HEADER_DESC 2
+#define MGMT_CMD_DESC_RESV 4
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ + MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
/*
@@ -197,8 +200,7 @@ struct cmd_desc_type0 {
__le64 addr_buffer4;
- __le32 reserved2;
- __le16 reserved;
+ u8 eth_addr[ETH_ALEN];
__le16 vlan_TCI;
} __attribute__ ((aligned(64)));
@@ -315,6 +317,8 @@ struct uni_data_desc{
#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
+#define QLCNIC_MSIX_TABLE_OFFSET 0x44
+
/* Flash memory map */
#define QLCNIC_BRDCFG_START 0x4000 /* board config */
#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
@@ -367,7 +371,7 @@ struct qlcnic_recv_crb {
*/
struct qlcnic_cmd_buffer {
struct sk_buff *skb;
- struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
u32 frag_count;
};
@@ -377,7 +381,6 @@ struct qlcnic_rx_buffer {
struct sk_buff *skb;
u64 dma;
u16 ref_handle;
- u16 state;
};
/* Board types */
@@ -419,7 +422,6 @@ struct qlcnic_adapter_stats {
u64 xmit_on;
u64 xmit_off;
u64 skb_alloc_failure;
- u64 null_skb;
u64 null_rxbuf;
u64 rx_dma_map_error;
u64 tx_dma_map_error;
@@ -542,7 +544,17 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
-#define QLCNIC_CDRP_CMD_MAX 0x0000001f
+#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
+
+#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
+#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
+#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
+#define QLCNIC_CDRP_CMD_RESET_NPAR 0x00000023
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
+#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
+#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
+#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
+#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
#define QLCNIC_RCODE_SUCCESS 0
#define QLCNIC_RCODE_TIMEOUT 17
@@ -556,12 +568,12 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_LSO (1 << 6)
#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
+#define QLCNIC_CAP0_VALIDOFF (1 << 11)
/*
* Context state
*/
-#define QLCHAL_VERSION 1
-
+#define QLCNIC_HOST_CTX_STATE_FREED 0
#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
/*
@@ -592,9 +604,10 @@ struct qlcnic_hostrq_rx_ctx {
__le32 sds_ring_offset; /* Offset to SDS config */
__le16 num_rds_rings; /* Count of RDS rings */
__le16 num_sds_rings; /* Count of SDS rings */
- __le16 rsvd1; /* Padding */
- __le16 rsvd2; /* Padding */
- u8 reserved[128]; /* reserve space for future expansion*/
+ __le16 valid_field_offset;
+ u8 txrx_sds_binding;
+ u8 msix_handler;
+ u8 reserved[128]; /* reserve space for future expansion*/
/* MUST BE 64-bit aligned.
The following is packed:
- N hostrq_rds_rings
@@ -881,12 +894,14 @@ struct qlcnic_mac_req {
#define QLCNIC_LRO_ENABLED 0x08
#define QLCNIC_BRIDGE_ENABLED 0X10
#define QLCNIC_DIAG_ENABLED 0x20
+#define QLCNIC_ESWITCH_ENABLED 0x40
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
+#define QLCNIC_MSIX_TBL_PGSIZE 4096
#define QLCNIC_NETDEV_WEIGHT 128
#define QLCNIC_ADAPTER_UP_MAGIC 777
@@ -919,11 +934,11 @@ struct qlcnic_adapter {
u8 rx_csum;
u8 portnum;
u8 physical_port;
+ u8 reset_context;
u8 mc_enabled;
u8 max_mc_count;
u8 rss_supported;
- u8 rsrvd1;
u8 fw_wait_cnt;
u8 fw_fail_cnt;
u8 tx_timeo_cnt;
@@ -940,6 +955,15 @@ struct qlcnic_adapter {
u16 link_autoneg;
u16 module_type;
+ u16 op_mode;
+ u16 switch_mode;
+ u16 max_tx_ques;
+ u16 max_rx_ques;
+ u16 min_tx_bw;
+ u16 max_tx_bw;
+ u16 max_mtu;
+
+ u32 fw_hal_version;
u32 capabilities;
u32 flags;
u32 irq;
@@ -948,18 +972,22 @@ struct qlcnic_adapter {
u32 int_vec_bit;
u32 heartbit;
+ u8 max_mac_filters;
u8 dev_state;
u8 diag_test;
u8 diag_cnt;
u8 reset_ack_timeo;
u8 dev_init_timeo;
- u8 rsrd1;
u16 msg_enable;
u8 mac_addr[ETH_ALEN];
u64 dev_rst_time;
+ struct qlcnic_pci_info *npars;
+ struct qlcnic_eswitch *eswitch;
+ struct qlcnic_nic_template *nic_ops;
+
struct qlcnic_adapter_stats stats;
struct qlcnic_recv_context recv_ctx;
@@ -974,8 +1002,6 @@ struct qlcnic_adapter {
struct delayed_work fw_work;
- struct work_struct tx_timeout_task;
-
struct qlcnic_nic_intr_coalesce coal;
unsigned long state;
@@ -984,6 +1010,53 @@ struct qlcnic_adapter {
const struct firmware *fw;
};
+struct qlcnic_info {
+ __le16 pci_func;
+ __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
+ __le16 phys_port;
+ __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
+
+ __le32 capabilities;
+ u8 max_mac_filters;
+ u8 reserved1;
+ __le16 max_mtu;
+
+ __le16 max_tx_ques;
+ __le16 max_rx_ques;
+ __le16 min_tx_bw;
+ __le16 max_tx_bw;
+ u8 reserved2[104];
+};
+
+struct qlcnic_pci_info {
+ __le16 id; /* pci function id */
+ __le16 active; /* 1 = Enabled */
+ __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
+ __le16 default_port; /* default port number */
+
+ __le16 tx_min_bw; /* Multiple of 100mbpc */
+ __le16 tx_max_bw;
+ __le16 reserved1[2];
+
+ u8 mac[ETH_ALEN];
+ u8 reserved2[106];
+};
+
+struct qlcnic_eswitch {
+ u8 port;
+ u8 active_vports;
+ u8 active_vlans;
+ u8 active_ucast_filters;
+ u8 max_ucast_filters;
+ u8 max_active_vlans;
+
+ u32 flags;
+#define QLCNIC_SWITCH_ENABLE BIT_1
+#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2
+#define QLCNIC_SWITCH_PROMISC_MODE BIT_3
+#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4
+};
+
int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
@@ -1031,13 +1104,13 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
/* Functions from qlcnic_init.c */
-int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
+int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1050,6 +1123,10 @@ void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
@@ -1070,13 +1147,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
-int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring);
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac);
void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
+void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1088,6 +1166,25 @@ int qlcnic_check_loopback_buff(unsigned char *data);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+/* Management functions */
+int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_get_nic_info(struct qlcnic_adapter *, u8);
+int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_get_pci_info(struct qlcnic_adapter *);
+int qlcnic_reset_partition(struct qlcnic_adapter *, u8);
+
+/* eSwitch management functions */
+int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
+ struct qlcnic_eswitch *);
+int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
+ struct qlcnic_eswitch *);
+int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
+int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8,
+ u8, u8, u16);
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
+extern int qlcnic_config_tso;
+
/*
* QLOGIC Board information
*/
@@ -1131,6 +1228,15 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
+struct qlcnic_nic_template {
+ int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
+ int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+ int (*config_led) (struct qlcnic_adapter *, u32, u32);
+ int (*set_ilb_mode) (struct qlcnic_adapter *);
+ void (*clear_ilb_mode) (struct qlcnic_adapter *);
+ int (*start_firmware) (struct qlcnic_adapter *);
+};
+
#define QLCDB(adapter, lvl, _fmt, _args...) do { \
if (NETIF_MSG_##lvl & adapter->msg_enable) \
printk(KERN_INFO "%s: %s: " _fmt, \
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index c2c1f5cc16c6..941cd0873f87 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -88,12 +88,12 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
if (qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
- QLCHAL_VERSION,
- recv_ctx->context_id,
- mtu,
- 0,
- QLCNIC_CDRP_CMD_SET_MTU)) {
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ recv_ctx->context_id,
+ mtu,
+ 0,
+ QLCNIC_CDRP_CMD_SET_MTU)) {
dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
return -EIO;
@@ -121,7 +121,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
int i, nrds_rings, nsds_rings;
size_t rq_size, rsp_size;
- u32 cap, reg, val;
+ u32 cap, reg, val, reg2;
int err;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -152,9 +152,14 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
- cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
+ cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
+ | QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+ prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
+ msix_handler);
+ prq->txrx_sds_binding = nsds_rings - 1;
+
prq->capabilities[0] = cpu_to_le32(cap);
prq->host_int_crb_mode =
cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
@@ -175,6 +180,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < nrds_rings; i++) {
rds_ring = &recv_ctx->rds_rings[i];
+ rds_ring->producer = 0;
prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
@@ -188,6 +194,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < nsds_rings; i++) {
sds_ring = &recv_ctx->sds_rings[i];
+ sds_ring->consumer = 0;
+ memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
@@ -197,7 +205,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
phys_addr = hostrq_phys_addr;
err = qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
- QLCHAL_VERSION,
+ adapter->fw_hal_version,
(u32)(phys_addr >> 32),
(u32)(phys_addr & 0xffffffff),
rq_size,
@@ -216,8 +224,12 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
rds_ring = &recv_ctx->rds_rings[i];
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
- rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
+ if (adapter->fw_hal_version == QLCNIC_FW_BASE)
+ rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
QLCNIC_REG(reg - 0x200));
+ else
+ rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 +
+ reg;
}
prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -227,12 +239,18 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
sds_ring = &recv_ctx->sds_rings[i];
reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
- sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
- QLCNIC_REG(reg - 0x200));
+ reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
- reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
- sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
+ if (adapter->fw_hal_version == QLCNIC_FW_BASE) {
+ sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
QLCNIC_REG(reg - 0x200));
+ sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg2 - 0x200));
+ } else {
+ sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 +
+ reg;
+ sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2;
+ }
}
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
@@ -253,7 +271,7 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
if (qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
- QLCHAL_VERSION,
+ adapter->fw_hal_version,
recv_ctx->context_id,
QLCNIC_DESTROY_CTX_RESET,
0,
@@ -262,6 +280,8 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
dev_err(&adapter->pdev->dev,
"Failed to destroy rx ctx in firmware\n");
}
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
}
static int
@@ -278,6 +298,11 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
dma_addr_t rq_phys_addr, rsp_phys_addr;
struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ /* reset host resources */
+ tx_ring->producer = 0;
+ tx_ring->sw_consumer = 0;
+ *(tx_ring->hw_consumer) = 0;
+
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
rq_addr = pci_alloc_consistent(adapter->pdev,
rq_size, &rq_phys_addr);
@@ -319,7 +344,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
phys_addr = rq_phys_addr;
err = qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
- QLCHAL_VERSION,
+ adapter->fw_hal_version,
(u32)(phys_addr >> 32),
((u32)phys_addr & 0xffffffff),
rq_size,
@@ -327,8 +352,12 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
if (err == QLCNIC_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
- tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
+ if (adapter->fw_hal_version == QLCNIC_FW_BASE)
+ tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
QLCNIC_REG(temp - 0x200));
+ else
+ tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 +
+ temp;
adapter->tx_context_id =
le16_to_cpu(prsp->context_id);
@@ -351,7 +380,7 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
{
if (qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
- QLCHAL_VERSION,
+ adapter->fw_hal_version,
adapter->tx_context_id,
QLCNIC_DESTROY_CTX_RESET,
0,
@@ -368,7 +397,7 @@ qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
if (qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
- QLCHAL_VERSION,
+ adapter->fw_hal_version,
reg,
0,
0,
@@ -385,7 +414,7 @@ qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
{
return qlcnic_issue_cmd(adapter,
adapter->ahw.pci_func,
- QLCHAL_VERSION,
+ adapter->fw_hal_version,
reg,
val,
0,
@@ -457,15 +486,6 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
sds_ring->desc_head = (struct status_desc *)addr;
}
-
- err = qlcnic_fw_cmd_create_rx_ctx(adapter);
- if (err)
- goto err_out_free;
- err = qlcnic_fw_cmd_create_tx_ctx(adapter);
- if (err)
- goto err_out_free;
-
- set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
return 0;
err_out_free:
@@ -473,15 +493,27 @@ err_out_free:
return err;
}
-void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
{
- struct qlcnic_recv_context *recv_ctx;
- struct qlcnic_host_rds_ring *rds_ring;
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
- int ring;
+ int err;
+ err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ return err;
+ err = qlcnic_fw_cmd_create_tx_ctx(adapter);
+ if (err) {
+ qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ return err;
+ }
+
+ set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ return 0;
+}
+
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
+{
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
qlcnic_fw_cmd_destroy_rx_ctx(adapter);
qlcnic_fw_cmd_destroy_tx_ctx(adapter);
@@ -489,6 +521,15 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
/* Allow dma queues to drain after context reset */
msleep(20);
}
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
recv_ctx = &adapter->recv_ctx;
@@ -533,3 +574,468 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
+/* Set MAC address of a NIC partition */
+int qlcnic_set_mac_address(struct qlcnic_adapter *adapter, u8* mac)
+{
+ int err = 0;
+ u32 arg1, arg2, arg3;
+
+ arg1 = adapter->ahw.pci_func | BIT_9;
+ arg2 = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ arg3 = mac[4] | (mac[5] << 16);
+
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ arg1,
+ arg2,
+ arg3,
+ QLCNIC_CDRP_CMD_MAC_ADDRESS);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set mac address%d\n", err);
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Get MAC address of a NIC partition */
+int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+{
+ int err;
+ u32 arg1;
+
+ arg1 = adapter->ahw.pci_func | BIT_8;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ arg1,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_MAC_ADDRESS);
+
+ if (err == QLCNIC_RCODE_SUCCESS)
+ qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
+ QLCNIC_ARG2_CRB_OFFSET, 0, mac);
+ else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get mac address%d\n", err);
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Get info of a NIC partition */
+int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, u8 func_id)
+{
+ int err;
+ dma_addr_t nic_dma_t;
+ struct qlcnic_info *nic_info;
+ void *nic_info_addr;
+ size_t nic_size = sizeof(struct qlcnic_info);
+
+ nic_info_addr = pci_alloc_consistent(adapter->pdev,
+ nic_size, &nic_dma_t);
+ if (!nic_info_addr)
+ return -ENOMEM;
+ memset(nic_info_addr, 0, nic_size);
+
+ nic_info = (struct qlcnic_info *) nic_info_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ MSD(nic_dma_t),
+ LSD(nic_dma_t),
+ (func_id << 16 | nic_size),
+ QLCNIC_CDRP_CMD_GET_NIC_INFO);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ adapter->physical_port = le16_to_cpu(nic_info->phys_port);
+ adapter->switch_mode = le16_to_cpu(nic_info->switch_mode);
+ adapter->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
+ adapter->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
+ adapter->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+ adapter->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
+ adapter->max_mtu = le16_to_cpu(nic_info->max_mtu);
+ adapter->capabilities = le32_to_cpu(nic_info->capabilities);
+ adapter->max_mac_filters = nic_info->max_mac_filters;
+
+ if (adapter->capabilities & BIT_6)
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ else
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+
+ dev_info(&adapter->pdev->dev,
+ "phy port: %d switch_mode: %d,\n"
+ "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
+ "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
+ adapter->physical_port, adapter->switch_mode,
+ adapter->max_tx_ques, adapter->max_rx_ques,
+ adapter->min_tx_bw, adapter->max_tx_bw,
+ adapter->max_mtu, adapter->capabilities);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get nic info%d\n", err);
+ err = -EIO;
+ }
+
+ pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
+ return err;
+}
+
+/* Configure a NIC partition */
+int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
+{
+ int err = -EIO;
+ u32 func_state;
+ dma_addr_t nic_dma_t;
+ void *nic_info_addr;
+ struct qlcnic_info *nic_info;
+ size_t nic_size = sizeof(struct qlcnic_info);
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return err;
+
+ if (qlcnic_api_lock(adapter))
+ return err;
+
+ func_state = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ if (QLC_DEV_CHECK_ACTIVE(func_state, nic->pci_func)) {
+ qlcnic_api_unlock(adapter);
+ return err;
+ }
+
+ qlcnic_api_unlock(adapter);
+
+ nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size,
+ &nic_dma_t);
+ if (!nic_info_addr)
+ return -ENOMEM;
+
+ memset(nic_info_addr, 0, nic_size);
+ nic_info = (struct qlcnic_info *)nic_info_addr;
+
+ nic_info->pci_func = cpu_to_le16(nic->pci_func);
+ nic_info->op_mode = cpu_to_le16(nic->op_mode);
+ nic_info->phys_port = cpu_to_le16(nic->phys_port);
+ nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
+ nic_info->capabilities = cpu_to_le32(nic->capabilities);
+ nic_info->max_mac_filters = nic->max_mac_filters;
+ nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
+ nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
+ nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
+ nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
+
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ MSD(nic_dma_t),
+ LSD(nic_dma_t),
+ nic_size,
+ QLCNIC_CDRP_CMD_SET_NIC_INFO);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set nic info%d\n", err);
+ err = -EIO;
+ }
+
+ pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
+ return err;
+}
+
+/* Get PCI Info of a partition */
+int qlcnic_get_pci_info(struct qlcnic_adapter *adapter)
+{
+ int err = 0, i;
+ dma_addr_t pci_info_dma_t;
+ struct qlcnic_pci_info *npar;
+ void *pci_info_addr;
+ size_t npar_size = sizeof(struct qlcnic_pci_info);
+ size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+
+ pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size,
+ &pci_info_dma_t);
+ if (!pci_info_addr)
+ return -ENOMEM;
+ memset(pci_info_addr, 0, pci_size);
+
+ if (!adapter->npars)
+ adapter->npars = kzalloc(pci_size, GFP_KERNEL);
+ if (!adapter->npars) {
+ err = -ENOMEM;
+ goto err_npar;
+ }
+
+ if (!adapter->eswitch)
+ adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+ QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
+ if (!adapter->eswitch) {
+ err = -ENOMEM;
+ goto err_eswitch;
+ }
+
+ npar = (struct qlcnic_pci_info *) pci_info_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ MSD(pci_info_dma_t),
+ LSD(pci_info_dma_t),
+ pci_size,
+ QLCNIC_CDRP_CMD_GET_PCI_INFO);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++) {
+ adapter->npars[i].id = le32_to_cpu(npar->id);
+ adapter->npars[i].active = le32_to_cpu(npar->active);
+ adapter->npars[i].type = le32_to_cpu(npar->type);
+ adapter->npars[i].default_port =
+ le32_to_cpu(npar->default_port);
+ adapter->npars[i].tx_min_bw =
+ le32_to_cpu(npar->tx_min_bw);
+ adapter->npars[i].tx_max_bw =
+ le32_to_cpu(npar->tx_max_bw);
+ memcpy(adapter->npars[i].mac, npar->mac, ETH_ALEN);
+ }
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get PCI Info%d\n", err);
+ kfree(adapter->npars);
+ err = -EIO;
+ }
+ goto err_npar;
+
+err_eswitch:
+ kfree(adapter->npars);
+ adapter->npars = NULL;
+
+err_npar:
+ pci_free_consistent(adapter->pdev, pci_size, pci_info_addr,
+ pci_info_dma_t);
+ return err;
+}
+
+/* Reset a NIC partition */
+
+int qlcnic_reset_partition(struct qlcnic_adapter *adapter, u8 func_no)
+{
+ int err = -EIO;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ func_no,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_RESET_NPAR);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to issue reset partition%d\n", err);
+ err = -EIO;
+ }
+
+ return err;
+}
+
+/* Get eSwitch Capabilities */
+int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
+ struct qlcnic_eswitch *eswitch)
+{
+ int err = -EIO;
+ u32 arg1, arg2;
+
+ if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ port,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
+
+ eswitch->port = arg1 & 0xf;
+ eswitch->active_vports = LSB(arg2);
+ eswitch->max_ucast_filters = MSB(arg2);
+ eswitch->max_active_vlans = LSB(MSW(arg2));
+ if (arg1 & BIT_6)
+ eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
+ if (arg1 & BIT_7)
+ eswitch->flags |= QLCNIC_SWITCH_PROMISC_MODE;
+ if (arg1 & BIT_8)
+ eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get eswitch capabilities%d\n", err);
+ }
+
+ return err;
+}
+
+/* Get current status of eswitch */
+int qlcnic_get_eswitch_status(struct qlcnic_adapter *adapter, u8 port,
+ struct qlcnic_eswitch *eswitch)
+{
+ int err = -EIO;
+ u32 arg1, arg2;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ port,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
+
+ eswitch->port = arg1 & 0xf;
+ eswitch->active_vports = LSB(arg2);
+ eswitch->active_ucast_filters = MSB(arg2);
+ eswitch->active_vlans = LSB(MSW(arg2));
+ if (arg1 & BIT_6)
+ eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
+ if (arg1 & BIT_8)
+ eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
+
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get eswitch status%d\n", err);
+ }
+
+ return err;
+}
+
+/* Enable/Disable eSwitch */
+int qlcnic_toggle_eswitch(struct qlcnic_adapter *adapter, u8 id, u8 enable)
+{
+ int err = -EIO;
+ u32 arg1, arg2;
+ struct qlcnic_eswitch *eswitch;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return err;
+
+ eswitch = &adapter->eswitch[id];
+ if (!eswitch)
+ return err;
+
+ arg1 = eswitch->port | (enable ? BIT_4 : 0);
+ arg2 = eswitch->active_vports | (eswitch->max_ucast_filters << 8) |
+ (eswitch->max_active_vlans << 16);
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ arg1,
+ arg2,
+ 0,
+ QLCNIC_CDRP_CMD_TOGGLE_ESWITCH);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to enable eswitch%d\n", eswitch->port);
+ eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
+ err = -EIO;
+ } else {
+ eswitch->flags |= QLCNIC_SWITCH_ENABLE;
+ dev_info(&adapter->pdev->dev,
+ "Enabled eSwitch for port %d\n", eswitch->port);
+ }
+
+ return err;
+}
+
+/* Configure eSwitch for port mirroring */
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
+ u8 enable_mirroring, u8 pci_func)
+{
+ int err = -EIO;
+ u32 arg1;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+ return err;
+
+ arg1 = id | (enable_mirroring ? BIT_4 : 0);
+ arg1 |= pci_func << 8;
+
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ arg1,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure port mirroring%d on eswitch:%d\n",
+ pci_func, id);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "Configured eSwitch %d for port mirroring:%d\n",
+ id, pci_func);
+ }
+
+ return err;
+}
+
+/* Configure eSwitch port */
+int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id,
+ int vlan_tagging, u8 discard_tagged, u8 promsc_mode,
+ u8 mac_learn, u8 pci_func, u16 vlan_id)
+{
+ int err = -EIO;
+ u32 arg1;
+ struct qlcnic_eswitch *eswitch;
+
+ if (adapter->op_mode != QLCNIC_MGMT_FUNC)
+ return err;
+
+ eswitch = &adapter->eswitch[id];
+ if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE))
+ return err;
+
+ arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0);
+ arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0);
+ arg1 |= pci_func << 8;
+ if (vlan_tagging)
+ arg1 |= BIT_5 | (vlan_id << 16);
+
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ adapter->fw_hal_version,
+ arg1,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure eswitch port%d\n", eswitch->port);
+ eswitch->flags |= QLCNIC_SWITCH_ENABLE;
+ } else {
+ eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
+ dev_info(&adapter->pdev->dev,
+ "Configured eSwitch for port %d\n", eswitch->port);
+ }
+
+ return err;
+}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 3bd514ec7e8f..d4e803e2a977 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,8 +69,6 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
- {"null skb",
- QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
{"null rxbuf",
QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
{"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
@@ -350,7 +348,7 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
for (i = 0; diag_registers[i] != -1; i++)
regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
@@ -683,13 +681,13 @@ static int qlcnic_loopback_test(struct net_device *netdev)
if (ret)
goto clear_it;
- ret = qlcnic_set_ilb_mode(adapter);
+ ret = adapter->nic_ops->set_ilb_mode(adapter);
if (ret)
goto done;
ret = qlcnic_do_ilb_test(adapter);
- qlcnic_clear_ilb_mode(adapter);
+ adapter->nic_ops->clear_ilb_mode(adapter);
done:
qlcnic_diag_free_res(netdev, max_sds_rings);
@@ -715,7 +713,8 @@ static int qlcnic_irq_test(struct net_device *netdev)
adapter->diag_cnt = 0;
ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
- QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011);
+ adapter->fw_hal_version, adapter->portnum,
+ 0, 0, 0x00000011);
if (ret)
goto done;
@@ -834,7 +833,10 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
struct qlcnic_adapter *adapter = netdev_priv(dev);
int ret;
- ret = qlcnic_config_led(adapter, 1, 0xf);
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return -EIO;
+
+ ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
if (ret) {
dev_err(&adapter->pdev->dev,
"Failed to set LED blink state.\n");
@@ -843,7 +845,7 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
msleep_interruptible(val * 1000);
- ret = qlcnic_config_led(adapter, 0, 0xf);
+ ret = adapter->nic_ops->config_led(adapter, 0, 0xf);
if (ret) {
dev_err(&adapter->pdev->dev,
"Failed to reset LED blink state.\n");
@@ -905,7 +907,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return -EINVAL;
/*
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index ad9d167723c4..7b81cab27002 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -208,6 +208,39 @@ enum {
QLCNIC_HW_PX_MAP_CRB_PGR0
};
+#define BIT_0 0x1
+#define BIT_1 0x2
+#define BIT_2 0x4
+#define BIT_3 0x8
+#define BIT_4 0x10
+#define BIT_5 0x20
+#define BIT_6 0x40
+#define BIT_7 0x80
+#define BIT_8 0x100
+#define BIT_9 0x200
+#define BIT_10 0x400
+#define BIT_11 0x800
+#define BIT_12 0x1000
+#define BIT_13 0x2000
+#define BIT_14 0x4000
+#define BIT_15 0x8000
+#define BIT_16 0x10000
+#define BIT_17 0x20000
+#define BIT_18 0x40000
+#define BIT_19 0x80000
+#define BIT_20 0x100000
+#define BIT_21 0x200000
+#define BIT_22 0x400000
+#define BIT_23 0x800000
+#define BIT_24 0x1000000
+#define BIT_25 0x2000000
+#define BIT_26 0x4000000
+#define BIT_27 0x8000000
+#define BIT_28 0x10000000
+#define BIT_29 0x20000000
+#define BIT_30 0x40000000
+#define BIT_31 0x80000000
+
/* This field defines CRB adr [31:20] of the agents */
#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
@@ -668,10 +701,11 @@ enum {
#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
-#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
-#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
-#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
+#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
+#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
+#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
+#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
@@ -684,15 +718,26 @@ enum {
#define QLCNIC_DEV_FAILED 0x6
#define QLCNIC_DEV_QUISCENT 0x7
+#define QLCNIC_DEV_NPAR_NOT_RDY 0
+#define QLCNIC_DEV_NPAR_RDY 1
+
+#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4)))
#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
+#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4)))
+#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4))
+
+#define QLCNIC_TYPE_NIC 1
+#define QLCNIC_TYPE_FCOE 2
+#define QLCNIC_TYPE_ISCSI 3
+
#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
-#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
-#define QLCNIC_RCODE_FATAL_ERROR 0x80000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30
+#define QLCNIC_RCODE_FATAL_ERROR BIT_31
#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
@@ -721,6 +766,35 @@ struct qlcnic_legacy_intr_set {
u32 pci_int_reg;
};
+#define QLCNIC_FW_API 0x1b216c
+#define QLCNIC_DRV_OP_MODE 0x1b2170
+#define QLCNIC_MSIX_BASE 0x132110
+#define QLCNIC_MAX_PCI_FUNC 8
+
+/* PCI function operational mode */
+enum {
+ QLCNIC_MGMT_FUNC = 0,
+ QLCNIC_PRIV_FUNC = 1,
+ QLCNIC_NON_PRIV_FUNC = 2
+};
+
+/* FW HAL api version */
+enum {
+ QLCNIC_FW_BASE = 1,
+ QLCNIC_FW_NPAR = 2
+};
+
+#define QLC_DEV_DRV_DEFAULT 0x11111111
+
+#define LSB(x) ((uint8_t)(x))
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+
+#define LSW(x) ((uint16_t)((uint32_t)(x)))
+#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16))
+
+#define LSD(x) ((uint32_t)((uint64_t)(x)))
+#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
#define QLCNIC_LEGACY_INTR_CONFIG \
{ \
{ \
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 0c2e1f08f459..e08c8b0556a4 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -327,7 +327,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
i = 0;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return -EIO;
tx_ring = adapter->tx_ring;
@@ -338,9 +338,15 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
netif_tx_stop_queue(tx_ring->txq);
- __netif_tx_unlock_bh(tx_ring->txq);
- adapter->stats.xmit_off++;
- return -EBUSY;
+ smp_mb();
+ if (qlcnic_tx_avail(tx_ring) > nr_desc) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_tx_wake_queue(tx_ring->txq);
+ } else {
+ adapter->stats.xmit_off++;
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
}
do {
@@ -407,10 +413,15 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
return -ENOMEM;
}
memcpy(cur->mac_addr, addr, ETH_ALEN);
- list_add_tail(&cur->list, &adapter->mac_list);
- return qlcnic_sre_macaddr_change(adapter,
- cur->mac_addr, QLCNIC_MAC_ADD);
+ if (qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_ADD)) {
+ kfree(cur);
+ return -EIO;
+ }
+
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return 0;
}
void qlcnic_set_multi(struct net_device *netdev)
@@ -420,7 +431,7 @@ void qlcnic_set_multi(struct net_device *netdev)
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
qlcnic_nic_add_mac(adapter, adapter->mac_addr);
@@ -538,7 +549,7 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
struct qlcnic_nic_req req;
u64 word;
@@ -704,21 +715,15 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
return rc;
}
-int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac)
{
- u32 crbaddr, mac_hi, mac_lo;
+ u32 crbaddr;
int pci_func = adapter->ahw.pci_func;
crbaddr = CRB_MAC_BLOCK_START +
(4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
- mac_lo = QLCRD32(adapter, crbaddr);
- mac_hi = QLCRD32(adapter, crbaddr+4);
-
- if (pci_func & 1)
- *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
- else
- *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+ qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac);
return 0;
}
@@ -766,7 +771,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
* Out: 'off' is 2M pci map addr
* side effect: lock crb window
*/
-static void
+static int
qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
{
u32 window;
@@ -775,6 +780,10 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
off -= QLCNIC_PCI_CRBSPACE;
window = CRB_HI(off);
+ if (window == 0) {
+ dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
+ return -EIO;
+ }
writel(window, addr);
if (readl(addr) != window) {
@@ -782,7 +791,9 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
dev_warn(&adapter->pdev->dev,
"failed to set CRB window to %d off 0x%lx\n",
window, off);
+ return -EIO;
}
+ return 0;
}
int
@@ -803,11 +814,12 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
/* indirect access */
write_lock_irqsave(&adapter->ahw.crb_lock, flags);
crb_win_lock(adapter);
- qlcnic_pci_set_crbwindow_2M(adapter, off);
- writel(data, addr);
+ rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
+ if (!rv)
+ writel(data, addr);
crb_win_unlock(adapter);
write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
- return 0;
+ return rv;
}
dev_err(&adapter->pdev->dev,
@@ -821,7 +833,7 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
{
unsigned long flags;
int rv;
- u32 data;
+ u32 data = -1;
void __iomem *addr = NULL;
rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
@@ -833,8 +845,8 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
/* indirect access */
write_lock_irqsave(&adapter->ahw.crb_lock, flags);
crb_win_lock(adapter);
- qlcnic_pci_set_crbwindow_2M(adapter, off);
- data = readl(addr);
+ if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
+ data = readl(addr);
crb_win_unlock(adapter);
write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
return data;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 71a4e664ad76..6678127ed4f2 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -112,15 +112,42 @@ void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
rds_ring = &recv_ctx->rds_rings[ring];
for (i = 0; i < rds_ring->num_desc; ++i) {
rx_buf = &(rds_ring->rx_buf_arr[i]);
- if (rx_buf->state == QLCNIC_BUFFER_FREE)
+ if (rx_buf->skb == NULL)
continue;
+
pci_unmap_single(adapter->pdev,
rx_buf->dma,
rds_ring->dma_size,
PCI_DMA_FROMDEVICE);
- if (rx_buf->skb != NULL)
- dev_kfree_skb_any(rx_buf->skb);
+
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = &adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ spin_lock(&rds_ring->lock);
+
+ INIT_LIST_HEAD(&rds_ring->free_list);
+
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf++;
}
+
+ spin_unlock(&rds_ring->lock);
}
}
@@ -181,7 +208,9 @@ skip_rds:
tx_ring = adapter->tx_ring;
vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
kfree(adapter->tx_ring);
+ adapter->tx_ring = NULL;
}
int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
@@ -264,7 +293,6 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
list_add_tail(&rx_buf->list,
&rds_ring->free_list);
rx_buf->ref_handle = i;
- rx_buf->state = QLCNIC_BUFFER_FREE;
rx_buf++;
}
spin_lock_init(&rds_ring->lock);
@@ -413,7 +441,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
/* resetall */
qlcnic_rom_lock(adapter);
- QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
qlcnic_rom_unlock(adapter);
if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
@@ -520,17 +548,16 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
int timeo;
u32 val;
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
- val = (val >> (adapter->portnum * 4)) & 0xf;
-
- if ((val & 0x3) != 1) {
- dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n",
- val);
- return -EIO;
+ if (adapter->fw_hal_version == QLCNIC_FW_BASE) {
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = QLC_DEV_GET_DRV(val, adapter->portnum);
+ if ((val & 0x3) != QLCNIC_TYPE_NIC) {
+ dev_err(&adapter->pdev->dev,
+ "Not an Ethernet NIC func=%u\n", val);
+ return -EIO;
+ }
+ adapter->physical_port = (val >> 2);
}
-
- adapter->physical_port = (val >> 2);
-
if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
timeo = 30;
@@ -544,16 +571,34 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
return 0;
}
+int
+qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
+{
+ u32 ver = -1, min_ver;
+
+ qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
+
+ ver = QLCNIC_DECODE_VERSION(ver);
+ min_ver = QLCNIC_MIN_FW_VERSION;
+
+ if (ver < min_ver) {
+ dev_err(&adapter->pdev->dev,
+ "firmware version %d.%d.%d unsupported."
+ "Min supported version %d.%d.%d\n",
+ _major(ver), _minor(ver), _build(ver),
+ _major(min_ver), _minor(min_ver), _build(min_ver));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
- u32 capability, flashed_ver;
+ u32 capability;
capability = 0;
- qlcnic_rom_fast_read(adapter,
- QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
- flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
-
capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
return 1;
@@ -1007,7 +1052,7 @@ static int
qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
{
__le32 val;
- u32 ver, min_ver, bios, min_size;
+ u32 ver, bios, min_size;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
u8 fw_type = adapter->fw_type;
@@ -1029,12 +1074,9 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
return -EINVAL;
val = qlcnic_get_fw_version(adapter);
-
- min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
-
ver = QLCNIC_DECODE_VERSION(val);
- if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
+ if (ver < QLCNIC_MIN_FW_VERSION) {
dev_err(&pdev->dev,
"%s: firmware version %d.%d.%d unsupported\n",
fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
@@ -1122,7 +1164,7 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
adapter->fw = NULL;
}
-int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
+static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
{
u32 val;
int retries = 60;
@@ -1147,7 +1189,8 @@ int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
out_err:
- dev_err(&adapter->pdev->dev, "firmware init failed\n");
+ dev_err(&adapter->pdev->dev, "Command Peg initialization not "
+ "complete, state: 0x%x.\n", val);
return -EIO;
}
@@ -1180,6 +1223,10 @@ int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
{
int err;
+ err = qlcnic_cmd_peg_ready(adapter);
+ if (err)
+ return err;
+
err = qlcnic_receive_peg_ready(adapter);
if (err)
return err;
@@ -1265,14 +1312,12 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
dma_addr_t dma;
struct pci_dev *pdev = adapter->pdev;
- buffer->skb = dev_alloc_skb(rds_ring->skb_size);
- if (!buffer->skb) {
+ skb = dev_alloc_skb(rds_ring->skb_size);
+ if (!skb) {
adapter->stats.skb_alloc_failure++;
return -ENOMEM;
}
- skb = buffer->skb;
-
skb_reserve(skb, 2);
dma = pci_map_single(pdev, skb->data,
@@ -1281,13 +1326,11 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
if (pci_dma_mapping_error(pdev, dma)) {
adapter->stats.rx_dma_map_error++;
dev_kfree_skb_any(skb);
- buffer->skb = NULL;
return -ENOMEM;
}
buffer->skb = skb;
buffer->dma = dma;
- buffer->state = QLCNIC_BUFFER_BUSY;
return 0;
}
@@ -1300,14 +1343,15 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
buffer = &rds_ring->rx_buf_arr[index];
+ if (unlikely(buffer->skb == NULL)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
- if (!skb) {
- adapter->stats.null_skb++;
- goto no_skb;
- }
if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
adapter->stats.csummed++;
@@ -1319,8 +1363,7 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb->dev = adapter->netdev;
buffer->skb = NULL;
-no_skb:
- buffer->state = QLCNIC_BUFFER_FREE;
+
return skb;
}
@@ -1495,7 +1538,7 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
WARN_ON(desc_cnt > 1);
- if (rxbuf)
+ if (likely(rxbuf))
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
else
adapter->stats.null_rxbuf++;
@@ -1701,3 +1744,24 @@ qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
sds_ring->consumer = consumer;
writel(consumer, sds_ring->crb_sts_consumer);
}
+
+void
+qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
+ u8 alt_mac, u8 *mac)
+{
+ u32 mac_low, mac_high;
+ int i;
+
+ mac_low = QLCRD32(adapter, off1);
+ mac_high = QLCRD32(adapter, off2);
+
+ if (alt_mac) {
+ mac_low |= (mac_low >> 16) | (mac_high << 16);
+ mac_high >>= 16;
+ }
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8)(mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8)(mac_low >> ((5 - i) * 8));
+}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 23ea9caa5261..3b71dfcd6d44 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -35,14 +35,14 @@
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
-MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
+MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
char qlcnic_driver_name[] = "qlcnic";
-static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
- QLCNIC_LINUX_VERSIONID;
+static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
+ "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
@@ -65,13 +65,16 @@ static int load_fw_file;
module_param(load_fw_file, int, 0644);
MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+static int qlcnic_config_npars;
+module_param(qlcnic_config_npars, int, 0644);
+MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
+
static int __devinit qlcnic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void __devexit qlcnic_remove(struct pci_dev *pdev);
static int qlcnic_open(struct net_device *netdev);
static int qlcnic_close(struct net_device *netdev);
static void qlcnic_tx_timeout(struct net_device *netdev);
-static void qlcnic_tx_timeout_task(struct work_struct *work);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
static void qlcnic_fw_poll_work(struct work_struct *work);
@@ -79,6 +82,7 @@ static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
work_func_t func, int delay);
static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
static int qlcnic_poll(struct napi_struct *napi, int budget);
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev);
#endif
@@ -99,7 +103,14 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data);
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
-
+static int qlcnic_start_firmware(struct qlcnic_adapter *);
+
+static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
+static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *);
+static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *);
+static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -120,12 +131,6 @@ qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
writel(tx_ring->producer, tx_ring->crb_cmd_producer);
-
- if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
- netif_stop_queue(adapter->netdev);
- smp_mb();
- adapter->stats.xmit_off++;
- }
}
static const u32 msi_tgt_status[8] = {
@@ -184,8 +189,13 @@ qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
+
+ if (ring == adapter->max_sds_rings - 1)
+ netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
+ QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
}
return 0;
@@ -307,19 +317,14 @@ static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
static int
qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
- int i;
- unsigned char *p;
- u64 mac_addr;
+ u8 mac_addr[ETH_ALEN];
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
+ if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
return -EIO;
- p = (unsigned char *)&mac_addr;
- for (i = 0; i < 6; i++)
- netdev->dev_addr[i] = *(p + 5 - i);
-
+ memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -340,7 +345,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- if (netif_running(netdev)) {
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_detach(netdev);
qlcnic_napi_disable(adapter);
}
@@ -349,7 +354,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
qlcnic_set_multi(adapter->netdev);
- if (netif_running(netdev)) {
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_attach(netdev);
qlcnic_napi_enable(adapter);
}
@@ -371,6 +376,33 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#endif
};
+static struct qlcnic_nic_template qlcnic_ops = {
+ .get_mac_addr = qlcnic_get_mac_addr,
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .set_ilb_mode = qlcnic_set_ilb_mode,
+ .clear_ilb_mode = qlcnic_clear_ilb_mode,
+ .start_firmware = qlcnic_start_firmware
+};
+
+static struct qlcnic_nic_template qlcnic_pf_ops = {
+ .get_mac_addr = qlcnic_get_mac_address,
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .set_ilb_mode = qlcnic_set_ilb_mode,
+ .clear_ilb_mode = qlcnic_clear_ilb_mode,
+ .start_firmware = qlcnic_start_firmware
+};
+
+static struct qlcnic_nic_template qlcnic_vf_ops = {
+ .get_mac_addr = qlcnic_get_mac_address,
+ .config_bridged_mode = qlcnicvf_config_bridged_mode,
+ .config_led = qlcnicvf_config_led,
+ .set_ilb_mode = qlcnicvf_set_ilb_mode,
+ .clear_ilb_mode = qlcnicvf_clear_ilb_mode,
+ .start_firmware = qlcnicvf_start_firmware
+};
+
static void
qlcnic_setup_intr(struct qlcnic_adapter *adapter)
{
@@ -453,6 +485,121 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
}
static int
+qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ u32 ref_count;
+ int i, ret = 1;
+ u32 data = QLCNIC_MGMT_FUNC;
+ void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+
+ /* If other drivers are not in use set their privilege level */
+ ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ ret = qlcnic_api_lock(adapter);
+ if (ret)
+ goto err_lock;
+ if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
+ goto err_npar;
+
+ if (qlcnic_config_npars) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ id = adapter->npars[i].id;
+ if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
+ id == adapter->ahw.pci_func)
+ continue;
+ data |= (qlcnic_config_npars &
+ QLC_DEV_SET_DRV(0xf, id));
+ }
+ } else {
+ data = readl(priv_op);
+ data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
+ (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
+ adapter->ahw.pci_func));
+ }
+ writel(data, priv_op);
+err_npar:
+ qlcnic_api_unlock(adapter);
+err_lock:
+ return ret;
+}
+
+static u32
+qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
+{
+ void __iomem *msix_base_addr;
+ void __iomem *priv_op;
+ u32 func;
+ u32 msix_base;
+ u32 op_mode, priv_level;
+
+ /* Determine FW API version */
+ adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
+ if (adapter->fw_hal_version == ~0) {
+ adapter->nic_ops = &qlcnic_ops;
+ adapter->fw_hal_version = QLCNIC_FW_BASE;
+ adapter->ahw.pci_func = PCI_FUNC(adapter->pdev->devfn);
+ adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+ dev_info(&adapter->pdev->dev,
+ "FW does not support nic partion\n");
+ return adapter->fw_hal_version;
+ }
+
+ /* Find PCI function number */
+ pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+ msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base = readl(msix_base_addr);
+ func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
+ adapter->ahw.pci_func = func;
+
+ qlcnic_get_nic_info(adapter, adapter->ahw.pci_func);
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ adapter->nic_ops = &qlcnic_ops;
+ return adapter->fw_hal_version;
+ }
+
+ /* Determine function privilege level */
+ priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+ op_mode = readl(priv_op);
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+
+ switch (priv_level) {
+ case QLCNIC_MGMT_FUNC:
+ adapter->op_mode = QLCNIC_MGMT_FUNC;
+ adapter->nic_ops = &qlcnic_pf_ops;
+ qlcnic_get_pci_info(adapter);
+ /* Set privilege level for other functions */
+ qlcnic_set_function_modes(adapter);
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Management function\n",
+ adapter->fw_hal_version);
+ break;
+ case QLCNIC_PRIV_FUNC:
+ adapter->op_mode = QLCNIC_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->fw_hal_version);
+ adapter->nic_ops = &qlcnic_pf_ops;
+ break;
+ case QLCNIC_NON_PRIV_FUNC:
+ adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ adapter->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ break;
+ default:
+ dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
+ priv_level);
+ return 0;
+ }
+ return adapter->fw_hal_version;
+}
+
+static int
qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
{
void __iomem *mem_ptr0 = NULL;
@@ -460,7 +607,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
unsigned long mem_len, pci_len0 = 0;
struct pci_dev *pdev = adapter->pdev;
- int pci_func = adapter->ahw.pci_func;
/* remap phys address */
mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
@@ -483,8 +629,13 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
adapter->ahw.pci_base0 = mem_ptr0;
adapter->ahw.pci_len0 = pci_len0;
+ if (!qlcnic_get_driver_mode(adapter)) {
+ iounmap(adapter->ahw.pci_base0);
+ return -EIO;
+ }
+
adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
- QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+ QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
return 0;
}
@@ -509,7 +660,7 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
}
if (!found)
- name = "Unknown";
+ sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
}
static void
@@ -553,8 +704,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
- adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
-
adapter->flags &= ~QLCNIC_LRO_ENABLED;
if (adapter->ahw.port_type == QLCNIC_XGBE) {
@@ -565,6 +714,8 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
}
+ qlcnic_get_nic_info(adapter, adapter->ahw.pci_func);
+
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
@@ -591,8 +742,12 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
if (load_fw_file)
qlcnic_request_firmware(adapter);
- else
+ else {
+ if (qlcnic_check_flash_fw_ver(adapter))
+ goto err_out;
+
adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
+ }
err = qlcnic_need_fw_reset(adapter);
if (err < 0)
@@ -602,6 +757,7 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
if (first_boot != 0x55555555) {
QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+ QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
qlcnic_pinit_from_rom(adapter);
msleep(1);
}
@@ -624,7 +780,7 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
wait_init:
/* Handshake with the card before we register the devices. */
- err = qlcnic_phantom_init(adapter);
+ err = qlcnic_init_firmware(adapter);
if (err)
goto err_out;
@@ -633,6 +789,10 @@ wait_init:
qlcnic_check_options(adapter);
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED &&
+ adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
+ qlcnic_dev_set_npar_ready(adapter);
+
adapter->need_fw_reset = 0;
qlcnic_release_firmware(adapter);
@@ -716,9 +876,23 @@ qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
static int
__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
+ int ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return -EIO;
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return 0;
+
+ if (qlcnic_fw_create_ctx(adapter))
+ return -EIO;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
qlcnic_set_multi(netdev);
qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
@@ -736,6 +910,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_linkevent_request(adapter, 1);
+ adapter->reset_context = 0;
set_bit(__QLCNIC_DEV_UP, &adapter->state);
return 0;
}
@@ -775,6 +950,9 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_napi_disable(adapter);
+ qlcnic_fw_destroy_ctx(adapter);
+
+ qlcnic_reset_rx_buffers_list(adapter);
qlcnic_release_tx_buffers(adapter);
spin_unlock(&adapter->tx_clean_lock);
}
@@ -796,16 +974,11 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- int err, ring;
- struct qlcnic_host_rds_ring *rds_ring;
+ int err;
if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
return 0;
- err = qlcnic_init_firmware(adapter);
- if (err)
- return err;
-
err = qlcnic_napi_add(adapter, netdev);
if (err)
return err;
@@ -813,7 +986,7 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
err = qlcnic_alloc_sw_resources(adapter);
if (err) {
dev_err(&pdev->dev, "Error in setting sw resources\n");
- return err;
+ goto err_out_napi_del;
}
err = qlcnic_alloc_hw_resources(adapter);
@@ -822,16 +995,10 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
goto err_out_free_sw;
}
-
- for (ring = 0; ring < adapter->max_rds_rings; ring++) {
- rds_ring = &adapter->recv_ctx.rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, ring, rds_ring);
- }
-
err = qlcnic_request_irq(adapter);
if (err) {
dev_err(&pdev->dev, "failed to setup interrupt\n");
- goto err_out_free_rxbuf;
+ goto err_out_free_hw;
}
qlcnic_init_coalesce_defaults(adapter);
@@ -841,11 +1008,12 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
return 0;
-err_out_free_rxbuf:
- qlcnic_release_rx_buffers(adapter);
+err_out_free_hw:
qlcnic_free_hw_resources(adapter);
err_out_free_sw:
qlcnic_free_sw_resources(adapter);
+err_out_napi_del:
+ qlcnic_napi_del(adapter);
return err;
}
@@ -880,6 +1048,8 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
}
}
+ qlcnic_fw_destroy_ctx(adapter);
+
qlcnic_detach(adapter);
adapter->diag_test = 0;
@@ -898,6 +1068,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
int ring;
int ret;
@@ -917,6 +1088,17 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
return ret;
}
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx.sds_rings[ring];
@@ -928,6 +1110,27 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
return 0;
}
+/* Reset context in hardware only */
+static int
+qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_up(adapter, netdev);
+
+ netif_device_attach(netdev);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
int
qlcnic_reset_context(struct qlcnic_adapter *adapter)
{
@@ -971,18 +1174,17 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
adapter->max_mc_count = 38;
netdev->netdev_ops = &qlcnic_netdev_ops;
- netdev->watchdog_timeo = 2*HZ;
+ netdev->watchdog_timeo = 5*HZ;
qlcnic_change_mtu(netdev, netdev->mtu);
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
- netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
- netdev->features |= (NETIF_F_GRO);
- netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6);
- netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
- netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6);
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
@@ -997,8 +1199,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->irq = adapter->msix_entries[0].vector;
- INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
-
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -1036,7 +1236,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev = NULL;
struct qlcnic_adapter *adapter = NULL;
int err;
- int pci_func_id = PCI_FUNC(pdev->devfn);
uint8_t revision_id;
uint8_t pci_using_dac;
@@ -1072,7 +1271,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->dev_rst_time = jiffies;
- adapter->ahw.pci_func = pci_func_id;
revision_id = pdev->revision;
adapter->ahw.revision_id = revision_id;
@@ -1088,7 +1286,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_netdev;
/* This will be reset for mezz cards */
- adapter->portnum = pci_func_id;
+ adapter->portnum = adapter->ahw.pci_func;
err = qlcnic_get_board_info(adapter);
if (err) {
@@ -1102,7 +1300,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (qlcnic_setup_idc_param(adapter))
goto err_out_iounmap;
- err = qlcnic_start_firmware(adapter);
+ err = adapter->nic_ops->start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
goto err_out_decr_ref;
@@ -1171,10 +1369,13 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
- cancel_work_sync(&adapter->tx_timeout_task);
-
qlcnic_detach(adapter);
+ if (adapter->npars != NULL)
+ kfree(adapter->npars);
+ if (adapter->eswitch != NULL)
+ kfree(adapter->eswitch);
+
qlcnic_clr_all_drv_state(adapter);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1206,10 +1407,6 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
- cancel_work_sync(&adapter->tx_timeout_task);
-
- qlcnic_detach(adapter);
-
qlcnic_clr_all_drv_state(adapter);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1263,35 +1460,23 @@ qlcnic_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- err = qlcnic_start_firmware(adapter);
+ err = adapter->nic_ops->start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "failed to start firmware\n");
return err;
}
if (netif_running(netdev)) {
- err = qlcnic_attach(adapter);
- if (err)
- goto err_out;
-
err = qlcnic_up(adapter, netdev);
if (err)
- goto err_out_detach;
-
+ goto done;
qlcnic_config_indev_addr(netdev, NETDEV_UP);
}
-
+done:
netif_device_attach(netdev);
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
return 0;
-
-err_out_detach:
- qlcnic_detach(adapter);
-err_out:
- qlcnic_clr_all_drv_state(adapter);
- netif_device_attach(netdev);
- return err;
}
#endif
@@ -1340,11 +1525,11 @@ qlcnic_tso_check(struct net_device *netdev,
u8 opcode = TX_ETHER_PKT;
__be16 protocol = skb->protocol;
u16 flags = 0, vid = 0;
- u32 producer;
int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ u32 producer = tx_ring->producer;
if (protocol == cpu_to_be16(ETH_P_8021Q)) {
@@ -1360,6 +1545,11 @@ qlcnic_tso_check(struct net_device *netdev,
vlan_oob = 1;
}
+ if (*(skb->data) & BIT_0) {
+ flags |= BIT_0;
+ memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+ }
+
if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
skb_shinfo(skb)->gso_size > 0) {
@@ -1409,7 +1599,6 @@ qlcnic_tso_check(struct net_device *netdev,
/* For LSO, we need to copy the MAC/IP/TCP headers into
* the descriptor ring
*/
- producer = tx_ring->producer;
copied = 0;
offset = 2;
@@ -1537,10 +1726,15 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
- if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+ if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
- adapter->stats.xmit_off++;
- return NETDEV_TX_BUSY;
+ smp_mb();
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_start_queue(netdev);
+ else {
+ adapter->stats.xmit_off++;
+ return NETDEV_TX_BUSY;
+ }
}
producer = tx_ring->producer;
@@ -1675,35 +1869,11 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
return;
dev_err(&netdev->dev, "transmit timeout, resetting.\n");
- schedule_work(&adapter->tx_timeout_task);
-}
-
-static void qlcnic_tx_timeout_task(struct work_struct *work)
-{
- struct qlcnic_adapter *adapter =
- container_of(work, struct qlcnic_adapter, tx_timeout_task);
-
- if (!netif_running(adapter->netdev))
- return;
-
- if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
- return;
if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
- goto request_reset;
-
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- if (!qlcnic_reset_context(adapter)) {
- adapter->netdev->trans_start = jiffies;
- return;
-
- /* context reset failed, fall through for fw reset */
- }
-
-request_reset:
- adapter->need_fw_reset = 1;
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- QLCDB(adapter, DRV, "Resetting adapter\n");
+ adapter->need_fw_reset = 1;
+ else
+ adapter->reset_context = 1;
}
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -1846,14 +2016,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
- __netif_tx_lock(tx_ring->txq, smp_processor_id());
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_wake_queue(netdev);
- adapter->tx_timeo_cnt = 0;
adapter->stats.xmit_on++;
}
- __netif_tx_unlock(tx_ring->txq);
}
+ adapter->tx_timeo_cnt = 0;
}
/*
* If everything is freed up to consumer then check if the ring is full
@@ -1898,6 +2066,25 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring =
+ container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ int work_done;
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
@@ -2109,7 +2296,7 @@ qlcnic_fwinit_work(struct work_struct *work)
{
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
- u32 dev_state = 0xf;
+ u32 dev_state = 0xf, npar_state;
if (qlcnic_api_lock(adapter))
goto err_ret;
@@ -2122,6 +2309,19 @@ qlcnic_fwinit_work(struct work_struct *work)
return;
}
+ if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ if (npar_state == QLCNIC_DEV_NPAR_RDY) {
+ qlcnic_api_unlock(adapter);
+ goto wait_npar;
+ } else {
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY);
+ qlcnic_api_unlock(adapter);
+ return;
+ }
+ }
+
if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
adapter->reset_ack_timeo);
@@ -2154,7 +2354,7 @@ skip_ack_check:
qlcnic_api_unlock(adapter);
- if (!qlcnic_start_firmware(adapter)) {
+ if (!adapter->nic_ops->start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
return;
}
@@ -2163,6 +2363,7 @@ skip_ack_check:
qlcnic_api_unlock(adapter);
+wait_npar:
dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
@@ -2177,7 +2378,7 @@ skip_ack_check:
break;
default:
- if (!qlcnic_start_firmware(adapter)) {
+ if (!adapter->nic_ops->start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
return;
}
@@ -2202,10 +2403,6 @@ qlcnic_detach_work(struct work_struct *work)
qlcnic_down(adapter, netdev);
- rtnl_lock();
- qlcnic_detach(adapter);
- rtnl_unlock();
-
status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
if (status & QLCNIC_RCODE_FATAL_ERROR)
@@ -2251,6 +2448,26 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
qlcnic_api_unlock(adapter);
}
+/* Transit to NPAR READY state from NPAR NOT READY state */
+static void
+qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+
+ if (state != QLCNIC_DEV_NPAR_RDY) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_RDY);
+ QLCDB(adapter, DRV, "NPAR READY state set\n");
+ }
+
+ qlcnic_api_unlock(adapter);
+}
+
static void
qlcnic_schedule_work(struct qlcnic_adapter *adapter,
work_func_t func, int delay)
@@ -2274,18 +2491,10 @@ qlcnic_attach_work(struct work_struct *work)
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
struct net_device *netdev = adapter->netdev;
- int err;
if (netif_running(netdev)) {
- err = qlcnic_attach(adapter);
- if (err)
- goto done;
-
- err = qlcnic_up(adapter, netdev);
- if (err) {
- qlcnic_detach(adapter);
+ if (qlcnic_up(adapter, netdev))
goto done;
- }
qlcnic_config_indev_addr(netdev, NETDEV_UP);
}
@@ -2322,6 +2531,12 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
adapter->fw_fail_cnt = 0;
if (adapter->need_fw_reset)
goto detach;
+
+ if (adapter->reset_context) {
+ qlcnic_reset_hw_context(adapter);
+ adapter->netdev->trans_start = jiffies;
+ }
+
return 0;
}
@@ -2365,6 +2580,46 @@ reschedule:
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
}
+static int
+qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_can_start_firmware(adapter);
+ if (err)
+ return err;
+
+ qlcnic_check_options(adapter);
+
+ adapter->need_fw_reset = 0;
+
+ return err;
+}
+
+static int
+qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ return -EOPNOTSUPP;
+}
+
+static void
+qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+
static ssize_t
qlcnic_store_bridged_mode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
@@ -2376,13 +2631,13 @@ qlcnic_store_bridged_mode(struct device *dev,
if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
goto err_out;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto err_out;
if (strict_strtoul(buf, 2, &new))
goto err_out;
- if (!qlcnic_config_bridged_mode(adapter, !!new))
+ if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
ret = len;
err_out:
@@ -2684,7 +2939,7 @@ recheck:
if (!adapter)
goto done;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto done;
qlcnic_config_indev_addr(dev, event);
@@ -2720,7 +2975,7 @@ recheck:
if (!adapter)
goto done;
- if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto done;
switch (event) {
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 20624ba44a37..bfb8b327f2fd 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1062,7 +1062,7 @@ struct tx_buf_desc {
#define TX_DESC_LEN_MASK 0x000fffff
#define TX_DESC_C 0x40000000
#define TX_DESC_E 0x80000000
-} __attribute((packed));
+} __packed;
/*
* IOCB Definitions...
@@ -1095,7 +1095,7 @@ struct ob_mac_iocb_req {
__le16 vlan_tci;
__le16 reserved4;
struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __attribute((packed));
+} __packed;
struct ob_mac_iocb_rsp {
u8 opcode; /* */
@@ -1112,7 +1112,7 @@ struct ob_mac_iocb_rsp {
u32 tid;
u32 txq_idx;
__le32 reserved[13];
-} __attribute((packed));
+} __packed;
struct ob_mac_tso_iocb_req {
u8 opcode;
@@ -1140,7 +1140,7 @@ struct ob_mac_tso_iocb_req {
__le16 vlan_tci;
__le16 mss;
struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __attribute((packed));
+} __packed;
struct ob_mac_tso_iocb_rsp {
u8 opcode;
@@ -1157,7 +1157,7 @@ struct ob_mac_tso_iocb_rsp {
u32 tid;
u32 txq_idx;
__le32 reserved2[13];
-} __attribute((packed));
+} __packed;
struct ib_mac_iocb_rsp {
u8 opcode; /* 0x20 */
@@ -1216,7 +1216,7 @@ struct ib_mac_iocb_rsp {
#define IB_MAC_IOCB_RSP_HL 0x80
__le32 hdr_len; /* */
__le64 hdr_addr; /* */
-} __attribute((packed));
+} __packed;
struct ib_ae_iocb_rsp {
u8 opcode;
@@ -1237,7 +1237,7 @@ struct ib_ae_iocb_rsp {
#define PCI_ERR_ANON_BUF_RD 0x40
u8 q_id;
__le32 reserved[15];
-} __attribute((packed));
+} __packed;
/*
* These three structures are for generic
@@ -1249,7 +1249,7 @@ struct ql_net_rsp_iocb {
__le16 length;
__le32 tid;
__le32 reserved[14];
-} __attribute((packed));
+} __packed;
struct net_req_iocb {
u8 opcode;
@@ -1257,7 +1257,7 @@ struct net_req_iocb {
__le16 flags1;
__le32 tid;
__le32 reserved1[30];
-} __attribute((packed));
+} __packed;
/*
* tx ring initialization control block for chip.
@@ -1283,7 +1283,7 @@ struct wqicb {
__le16 rid;
__le64 addr;
__le64 cnsmr_idx_addr;
-} __attribute((packed));
+} __packed;
/*
* rx ring initialization control block for chip.
@@ -1317,7 +1317,7 @@ struct cqicb {
__le64 sbq_addr;
__le16 sbq_buf_size;
__le16 sbq_len; /* entry count */
-} __attribute((packed));
+} __packed;
struct ricb {
u8 base_cq;
@@ -1335,7 +1335,7 @@ struct ricb {
u8 hash_cq_id[1024];
__le32 ipv6_hash_key[10];
__le32 ipv4_hash_key[4];
-} __attribute((packed));
+} __packed;
/* SOFTWARE/DRIVER DATA STRUCTURES. */
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 9a251acf5ab8..7d482a2316ac 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -44,12 +44,13 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/uaccess.h>
+#include <linux/phy.h>
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.25"
-#define DRV_RELDATE "20Aug2009"
+#define DRV_VERSION "0.26"
+#define DRV_RELDATE "30May2010"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -179,7 +180,6 @@ struct r6040_descriptor {
struct r6040_private {
spinlock_t lock; /* driver lock */
- struct timer_list timer;
struct pci_dev *pdev;
struct r6040_descriptor *rx_insert_ptr;
struct r6040_descriptor *rx_remove_ptr;
@@ -189,13 +189,15 @@ struct r6040_private {
struct r6040_descriptor *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
- u16 tx_free_desc, phy_addr, phy_mode;
+ u16 tx_free_desc, phy_addr;
u16 mcr0, mcr1;
- u16 switch_sig;
struct net_device *dev;
- struct mii_if_info mii_if;
+ struct mii_bus *mii_bus;
struct napi_struct napi;
void __iomem *base;
+ struct phy_device *phydev;
+ int old_link;
+ int old_duplex;
};
static char version[] __devinitdata = KERN_INFO DRV_NAME
@@ -238,20 +240,30 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val
}
}
-static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg)
+static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
{
+ struct net_device *dev = bus->priv;
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- return (r6040_phy_read(ioaddr, lp->phy_addr, reg));
+ return r6040_phy_read(ioaddr, phy_addr, reg);
}
-static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val)
+static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
+ int reg, u16 value)
{
+ struct net_device *dev = bus->priv;
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- r6040_phy_write(ioaddr, lp->phy_addr, reg, val);
+ r6040_phy_write(ioaddr, phy_addr, reg, value);
+
+ return 0;
+}
+
+static int r6040_mdiobus_reset(struct mii_bus *bus)
+{
+ return 0;
}
static void r6040_free_txbufs(struct net_device *dev)
@@ -408,10 +420,9 @@ static void r6040_tx_timeout(struct net_device *dev)
void __iomem *ioaddr = priv->base;
netdev_warn(dev, "transmit timed out, int enable %4.4x "
- "status %4.4x, PHY status %4.4x\n",
+ "status %4.4x\n",
ioread16(ioaddr + MIER),
- ioread16(ioaddr + MISR),
- r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
+ ioread16(ioaddr + MISR));
dev->stats.tx_errors++;
@@ -463,9 +474,6 @@ static int r6040_close(struct net_device *dev)
struct r6040_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pdev;
- /* deleted timer */
- del_timer_sync(&lp->timer);
-
spin_lock_irq(&lp->lock);
napi_disable(&lp->napi);
netif_stop_queue(dev);
@@ -495,64 +503,14 @@ static int r6040_close(struct net_device *dev)
return 0;
}
-/* Status of PHY CHIP */
-static int r6040_phy_mode_chk(struct net_device *dev)
-{
- struct r6040_private *lp = netdev_priv(dev);
- void __iomem *ioaddr = lp->base;
- int phy_dat;
-
- /* PHY Link Status Check */
- phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
- if (!(phy_dat & 0x4))
- phy_dat = 0x8000; /* Link Failed, full duplex */
-
- /* PHY Chip Auto-Negotiation Status */
- phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
- if (phy_dat & 0x0020) {
- /* Auto Negotiation Mode */
- phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5);
- phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4);
- if (phy_dat & 0x140)
- /* Force full duplex */
- phy_dat = 0x8000;
- else
- phy_dat = 0;
- } else {
- /* Force Mode */
- phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0);
- if (phy_dat & 0x100)
- phy_dat = 0x8000;
- else
- phy_dat = 0x0000;
- }
-
- return phy_dat;
-};
-
-static void r6040_set_carrier(struct mii_if_info *mii)
-{
- if (r6040_phy_mode_chk(mii->dev)) {
- /* autoneg is off: Link is always assumed to be up */
- if (!netif_carrier_ok(mii->dev))
- netif_carrier_on(mii->dev);
- } else
- r6040_phy_mode_chk(mii->dev);
-}
-
static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct r6040_private *lp = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(rq);
- int rc;
- if (!netif_running(dev))
+ if (!lp->phydev)
return -EINVAL;
- spin_lock_irq(&lp->lock);
- rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
- spin_unlock_irq(&lp->lock);
- r6040_set_carrier(&lp->mii_if);
- return rc;
+
+ return phy_mii_ioctl(lp->phydev, if_mii(rq), cmd);
}
static int r6040_rx(struct net_device *dev, int limit)
@@ -751,26 +709,6 @@ static int r6040_up(struct net_device *dev)
if (ret)
return ret;
- /* Read the PHY ID */
- lp->switch_sig = r6040_phy_read(ioaddr, 0, 2);
-
- if (lp->switch_sig == ICPLUS_PHY_ID) {
- r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */
- lp->phy_mode = 0x8000;
- } else {
- /* PHY Mode Check */
- r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP);
- r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE);
-
- if (PHY_MODE == 0x3100)
- lp->phy_mode = r6040_phy_mode_chk(dev);
- else
- lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
- }
-
- /* Set duplex mode */
- lp->mcr0 |= lp->phy_mode;
-
/* improve performance (by RDC guys) */
r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
@@ -783,35 +721,6 @@ static int r6040_up(struct net_device *dev)
return 0;
}
-/*
- A periodic timer routine
- Polling PHY Chip Link Status
-*/
-static void r6040_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct r6040_private *lp = netdev_priv(dev);
- void __iomem *ioaddr = lp->base;
- u16 phy_mode;
-
- /* Polling PHY Chip Status */
- if (PHY_MODE == 0x3100)
- phy_mode = r6040_phy_mode_chk(dev);
- else
- phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
-
- if (phy_mode != lp->phy_mode) {
- lp->phy_mode = phy_mode;
- lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode;
- iowrite16(lp->mcr0, ioaddr);
- }
-
- /* Timer active again */
- mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
-
- /* Check media */
- mii_check_media(&lp->mii_if, 1, 1);
-}
/* Read/set MAC address routines */
static void r6040_mac_address(struct net_device *dev)
@@ -873,10 +782,6 @@ static int r6040_open(struct net_device *dev)
napi_enable(&lp->napi);
netif_start_queue(dev);
- /* set and active a timer process */
- setup_timer(&lp->timer, r6040_timer, (unsigned long) dev);
- if (lp->switch_sig != ICPLUS_PHY_ID)
- mod_timer(&lp->timer, jiffies + HZ);
return 0;
}
@@ -1015,40 +920,22 @@ static void netdev_get_drvinfo(struct net_device *dev,
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct r6040_private *rp = netdev_priv(dev);
- int rc;
-
- spin_lock_irq(&rp->lock);
- rc = mii_ethtool_gset(&rp->mii_if, cmd);
- spin_unlock_irq(&rp->lock);
- return rc;
+ return phy_ethtool_gset(rp->phydev, cmd);
}
static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct r6040_private *rp = netdev_priv(dev);
- int rc;
-
- spin_lock_irq(&rp->lock);
- rc = mii_ethtool_sset(&rp->mii_if, cmd);
- spin_unlock_irq(&rp->lock);
- r6040_set_carrier(&rp->mii_if);
-
- return rc;
-}
-
-static u32 netdev_get_link(struct net_device *dev)
-{
- struct r6040_private *rp = netdev_priv(dev);
- return mii_link_ok(&rp->mii_if);
+ return phy_ethtool_sset(rp->phydev, cmd);
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_settings = netdev_get_settings,
.set_settings = netdev_set_settings,
- .get_link = netdev_get_link,
+ .get_link = ethtool_op_get_link,
};
static const struct net_device_ops r6040_netdev_ops = {
@@ -1067,6 +954,79 @@ static const struct net_device_ops r6040_netdev_ops = {
#endif
};
+static void r6040_adjust_link(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ struct phy_device *phydev = lp->phydev;
+ int status_changed = 0;
+ void __iomem *ioaddr = lp->base;
+
+ BUG_ON(!phydev);
+
+ if (lp->old_link != phydev->link) {
+ status_changed = 1;
+ lp->old_link = phydev->link;
+ }
+
+ /* reflect duplex change */
+ if (phydev->link && (lp->old_duplex != phydev->duplex)) {
+ lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
+ iowrite16(lp->mcr0, ioaddr);
+
+ status_changed = 1;
+ lp->old_duplex = phydev->duplex;
+ }
+
+ if (status_changed) {
+ pr_info("%s: link %s", dev->name, phydev->link ?
+ "UP" : "DOWN");
+ if (phydev->link)
+ pr_cont(" - %d/%s", phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "full" : "half");
+ pr_cont("\n");
+ }
+}
+
+static int r6040_mii_probe(struct net_device *dev)
+{
+ struct r6040_private *lp = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+
+ phydev = phy_find_first(lp->mii_bus);
+ if (!phydev) {
+ dev_err(&lp->pdev->dev, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
+ 0, PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(phydev)) {
+ dev_err(&lp->pdev->dev, "could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg
+ | SUPPORTED_MII
+ | SUPPORTED_TP);
+
+ phydev->advertising = phydev->supported;
+ lp->phydev = phydev;
+ lp->old_link = 0;
+ lp->old_duplex = -1;
+
+ dev_info(&lp->pdev->dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
+
+ return 0;
+}
+
static int __devinit r6040_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1077,6 +1037,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
static int card_idx = -1;
int bar = 0;
u16 *adrp;
+ int i;
printk("%s\n", version);
@@ -1163,7 +1124,6 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Init RDC private data */
lp->mcr0 = 0x1002;
lp->phy_addr = phy_table[card_idx];
- lp->switch_sig = 0;
/* The RDC-specific entries in the device structure. */
dev->netdev_ops = &r6040_netdev_ops;
@@ -1171,28 +1131,54 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev->watchdog_timeo = TX_TIMEOUT;
netif_napi_add(dev, &lp->napi, r6040_poll, 64);
- lp->mii_if.dev = dev;
- lp->mii_if.mdio_read = r6040_mdio_read;
- lp->mii_if.mdio_write = r6040_mdio_write;
- lp->mii_if.phy_id = lp->phy_addr;
- lp->mii_if.phy_id_mask = 0x1f;
- lp->mii_if.reg_num_mask = 0x1f;
-
- /* Check the vendor ID on the PHY, if 0xffff assume none attached */
- if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
- dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
- err = -ENODEV;
+
+ lp->mii_bus = mdiobus_alloc();
+ if (!lp->mii_bus) {
+ dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
goto err_out_unmap;
}
+ lp->mii_bus->priv = dev;
+ lp->mii_bus->read = r6040_mdiobus_read;
+ lp->mii_bus->write = r6040_mdiobus_write;
+ lp->mii_bus->reset = r6040_mdiobus_reset;
+ lp->mii_bus->name = "r6040_eth_mii";
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
+ lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!lp->mii_bus->irq) {
+ dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
+ goto err_out_mdio;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ lp->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(lp->mii_bus);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register MII bus\n");
+ goto err_out_mdio_irq;
+ }
+
+ err = r6040_mii_probe(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to probe MII bus\n");
+ goto err_out_mdio_unregister;
+ }
+
/* Register net device. After this dev->name assign */
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Failed to register net device\n");
- goto err_out_unmap;
+ goto err_out_mdio_unregister;
}
return 0;
+err_out_mdio_unregister:
+ mdiobus_unregister(lp->mii_bus);
+err_out_mdio_irq:
+ kfree(lp->mii_bus->irq);
+err_out_mdio:
+ mdiobus_free(lp->mii_bus);
err_out_unmap:
pci_iounmap(pdev, ioaddr);
err_out_free_res:
@@ -1206,8 +1192,12 @@ err_out:
static void __devexit r6040_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
+ struct r6040_private *lp = netdev_priv(dev);
unregister_netdev(dev);
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 96b6cfbf0a3a..239d7efdd450 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -88,7 +88,7 @@ static const int multicast_filter_limit = 32;
#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
#define RTL_R8(reg) readb (ioaddr + (reg))
#define RTL_R16(reg) readw (ioaddr + (reg))
-#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
+#define RTL_R32(reg) readl (ioaddr + (reg))
enum mac_version {
RTL_GIGA_MAC_NONE = 0x00,
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 156460527231..8ad476a19d95 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -27,6 +27,7 @@
#include "nic.h"
#include "mcdi.h"
+#include "workarounds.h"
/**************************************************************************
*
@@ -92,13 +93,6 @@ const char *efx_reset_type_names[] = {
#define EFX_MAX_MTU (9 * 1024)
-/* RX slow fill workqueue. If memory allocation fails in the fast path,
- * a work item is pushed onto this work queue to retry the allocation later,
- * to avoid the NIC being starved of RX buffers. Since this is a per cpu
- * workqueue, there is nothing to be gained in making it per NIC
- */
-static struct workqueue_struct *refill_workqueue;
-
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -475,7 +469,8 @@ static void efx_init_channels(struct efx_nic *efx)
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
- efx->rx_buffer_order = get_order(efx->rx_buffer_len);
+ efx->rx_buffer_order = get_order(efx->rx_buffer_len +
+ sizeof(struct efx_rx_page_state));
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
@@ -515,11 +510,11 @@ static void efx_start_channel(struct efx_channel *channel)
channel->enabled = true;
smp_wmb();
- napi_enable(&channel->napi_str);
-
- /* Load up RX descriptors */
+ /* Fill the queues before enabling NAPI */
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fast_push_rx_descriptors(rx_queue);
+
+ napi_enable(&channel->napi_str);
}
/* This disables event queue processing and packet transmission.
@@ -528,8 +523,6 @@ static void efx_start_channel(struct efx_channel *channel)
*/
static void efx_stop_channel(struct efx_channel *channel)
{
- struct efx_rx_queue *rx_queue;
-
if (!channel->enabled)
return;
@@ -537,12 +530,6 @@ static void efx_stop_channel(struct efx_channel *channel)
channel->enabled = false;
napi_disable(&channel->napi_str);
-
- /* Ensure that any worker threads have exited or will be no-ops */
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- spin_lock_bh(&rx_queue->add_lock);
- spin_unlock_bh(&rx_queue->add_lock);
- }
}
static void efx_fini_channels(struct efx_nic *efx)
@@ -556,10 +543,18 @@ static void efx_fini_channels(struct efx_nic *efx)
BUG_ON(efx->port_enabled);
rc = efx_nic_flush_queues(efx);
- if (rc)
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset. */
+ EFX_ERR(efx, "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
EFX_ERR(efx, "failed to flush queues\n");
- else
+ } else {
EFX_LOG(efx, "successfully flushed all queues\n");
+ }
efx_for_each_channel(channel, efx) {
EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
@@ -586,9 +581,9 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_remove_eventq(channel);
}
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
- queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
/**************************************************************************
@@ -1233,15 +1228,8 @@ static void efx_start_all(struct efx_nic *efx)
* since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
- struct efx_rx_queue *rx_queue;
-
/* Make sure the hardware monitor is stopped */
cancel_delayed_work_sync(&efx->monitor_work);
-
- /* Ensure that all RX slow refills are complete. */
- efx_for_each_rx_queue(rx_queue, efx)
- cancel_delayed_work_sync(&rx_queue->work);
-
/* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->mac_work);
}
@@ -1504,11 +1492,11 @@ static int efx_net_stop(struct net_device *net_dev)
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
+static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
- struct net_device_stats *stats = &net_dev->stats;
+ struct rtnl_link_stats64 *stats = &net_dev->stats64;
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx);
@@ -1530,11 +1518,8 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
stats->tx_window_errors = mac_stats->tx_late_collision;
stats->rx_errors = (stats->rx_length_errors +
- stats->rx_over_errors +
stats->rx_crc_errors +
stats->rx_frame_errors +
- stats->rx_fifo_errors +
- stats->rx_missed_errors +
mac_stats->rx_symbol_error);
stats->tx_errors = (stats->tx_window_errors +
mac_stats->tx_bad);
@@ -1645,7 +1630,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
- .ndo_get_stats = efx_net_stats,
+ .ndo_get_stats64 = efx_net_stats,
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
@@ -1886,6 +1871,9 @@ static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ if (efx->reset_pending == RESET_TYPE_NONE)
+ return;
+
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flag set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
@@ -2052,8 +2040,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
rx_queue->queue = i;
rx_queue->channel = &efx->channel[0]; /* for safety */
rx_queue->buffer = NULL;
- spin_lock_init(&rx_queue->add_lock);
- INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
}
efx->type = type;
@@ -2332,6 +2320,9 @@ static int efx_pm_thaw(struct device *dev)
efx->type->resume_wol(efx);
+ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+ queue_work(reset_workqueue, &efx->reset_work);
+
return 0;
}
@@ -2421,11 +2412,6 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
- refill_workqueue = create_workqueue("sfc_refill");
- if (!refill_workqueue) {
- rc = -ENOMEM;
- goto err_refill;
- }
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
rc = -ENOMEM;
@@ -2441,8 +2427,6 @@ static int __init efx_init_module(void)
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
- destroy_workqueue(refill_workqueue);
- err_refill:
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -2454,7 +2438,6 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
- destroy_workqueue(refill_workqueue);
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index ffd708c5304a..e1e448887dfc 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -47,12 +47,12 @@ extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_work(struct work_struct *data);
+extern void efx_rx_slow_fill(unsigned long context);
extern void __efx_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, bool checksummed);
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard);
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
+extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_RXQ_SIZE 1024
#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 655b697b45b2..8558865ff380 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -548,7 +548,9 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
efx_oword_t reg;
- int link_speed;
+ int link_speed, isolate;
+
+ isolate = (efx->reset_pending != RESET_TYPE_NONE);
switch (link_state->speed) {
case 10000: link_speed = 3; break;
@@ -570,7 +572,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
* discarded. */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
- !link_state->up);
+ !link_state->up || isolate);
}
efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
@@ -584,7 +586,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
/* Unisolate the MAC -> RX */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
efx_writeo(efx, &reg, FR_AZ_RX_CFG);
}
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 6032c0e1f1f8..86e43b1f7689 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -20,7 +20,7 @@
#include "nic.h"
#include "selftest.h"
-struct efx_mcdi_phy_cfg {
+struct efx_mcdi_phy_data {
u32 flags;
u32 type;
u32 supported_cap;
@@ -35,7 +35,7 @@ struct efx_mcdi_phy_cfg {
};
static int
-efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
+efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
size_t outlen;
@@ -259,7 +259,7 @@ static u32 ethtool_to_mcdi_cap(u32 cap)
static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
enum efx_phy_mode mode, supported;
u32 flags;
@@ -307,7 +307,7 @@ static u32 mcdi_to_ethtool_media(u32 media)
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_data;
+ struct efx_mcdi_phy_data *phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
u32 caps;
int rc;
@@ -395,6 +395,7 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
efx->wanted_fc |= EFX_FC_AUTO;
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
return 0;
@@ -405,7 +406,7 @@ fail:
int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps = (efx->link_advertising ?
ethtool_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
@@ -446,7 +447,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
*/
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
/* The link partner capabilities are only relevent if the
@@ -505,7 +506,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
int rc;
@@ -535,7 +536,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
@@ -674,7 +675,7 @@ out:
static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
unsigned flags)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 mode;
int rc;
@@ -712,7 +713,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
if (index == 0)
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 4762c91cb587..ba636e086fc3 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,6 +18,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/timer.h>
#include <linux/mdio.h>
#include <linux/list.h>
#include <linux/pci.h>
@@ -221,7 +222,6 @@ struct efx_tx_queue {
* If both this and skb are %NULL, the buffer slot is currently free.
* @data: Pointer to ethernet header
* @len: Buffer length, in bytes.
- * @unmap_addr: DMA address to unmap
*/
struct efx_rx_buffer {
dma_addr_t dma_addr;
@@ -229,7 +229,24 @@ struct efx_rx_buffer {
struct page *page;
char *data;
unsigned int len;
- dma_addr_t unmap_addr;
+};
+
+/**
+ * struct efx_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @refcnt: Number of struct efx_rx_buffer's referencing this page.
+ * When refcnt falls to zero, the page is unmapped for dma
+ * @dma_addr: The dma address of this page.
+ */
+struct efx_rx_page_state {
+ unsigned refcnt;
+ dma_addr_t dma_addr;
+
+ unsigned int __pad[0] ____cacheline_aligned;
};
/**
@@ -242,10 +259,6 @@ struct efx_rx_buffer {
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
- * @add_lock: Receive queue descriptor add spin lock.
- * This lock must be held in order to add buffers to the RX
- * descriptor ring (rxd and buffer) and to update added_count (but
- * not removed_count).
* @max_fill: RX descriptor maximum fill level (<= ring size)
* @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
* (<= @max_fill)
@@ -259,12 +272,7 @@ struct efx_rx_buffer {
* overflow was observed. It should never be set.
* @alloc_page_count: RX allocation strategy counter.
* @alloc_skb_count: RX allocation strategy counter.
- * @work: Descriptor push work thread
- * @buf_page: Page for next RX buffer.
- * We can use a single page for multiple RX buffers. This tracks
- * the remaining space in the allocation.
- * @buf_dma_addr: Page's DMA address.
- * @buf_data: Page's host address.
+ * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
* @flushed: Use when handling queue flushing
*/
struct efx_rx_queue {
@@ -277,7 +285,6 @@ struct efx_rx_queue {
int added_count;
int notified_count;
int removed_count;
- spinlock_t add_lock;
unsigned int max_fill;
unsigned int fast_fill_trigger;
unsigned int fast_fill_limit;
@@ -285,12 +292,9 @@ struct efx_rx_queue {
unsigned int min_overfill;
unsigned int alloc_page_count;
unsigned int alloc_skb_count;
- struct delayed_work work;
+ struct timer_list slow_fill;
unsigned int slow_fill_count;
- struct page *buf_page;
- dma_addr_t buf_dma_addr;
- char *buf_data;
enum efx_flush_state flushed;
};
@@ -336,7 +340,7 @@ enum efx_rx_alloc_method {
* @eventq: Event queue buffer
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
- * @eventq_magic: Event queue magic value for driver-generated test events
+ * @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -367,7 +371,7 @@ struct efx_channel {
struct efx_special_buffer eventq;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
- unsigned int eventq_magic;
+ unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;
@@ -645,6 +649,7 @@ union efx_multicast_hash {
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
* @pci_dev: The PCI device
+ * @port_num: Index of this host port within the controller
* @type: Controller type attributes
* @legacy_irq: IRQ number
* @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -728,6 +733,7 @@ union efx_multicast_hash {
struct efx_nic {
char name[IFNAMSIZ];
struct pci_dev *pci_dev;
+ unsigned port_num;
const struct efx_nic_type *type;
int legacy_irq;
struct workqueue_struct *workqueue;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 5d3aaec58556..0ee6fd367e6f 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -79,6 +79,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
/* Depth of RX flush request fifo */
#define EFX_RX_FLUSH_COUNT 4
+/* Generated event code for efx_generate_test_event() */
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ (0x00010100 + (_channel)->channel)
+
+/* Generated event code for efx_generate_fill_event() */
+#define EFX_CHANNEL_MAGIC_FILL(_channel) \
+ (0x00010200 + (_channel)->channel)
+
/**************************************************************************
*
* Solarstorm hardware access
@@ -850,6 +858,26 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
checksummed, discard);
}
+static void
+efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned code;
+
+ code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ if (code == EFX_CHANNEL_MAGIC_TEST(channel))
+ ++channel->magic_count;
+ else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
+ else
+ EFX_LOG(efx, "channel %d received generated "
+ "event "EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
+}
+
/* Global events are basically PHY events */
static void
efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
@@ -993,11 +1021,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
}
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
- channel->eventq_magic = EFX_QWORD_FIELD(
- event, FSF_AZ_DRV_GEN_EV_MAGIC);
- EFX_LOG(channel->efx, "channel %d received generated "
- "event "EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(event));
+ efx_handle_generated_event(channel, &event);
break;
case FSE_AZ_EV_CODE_GLOBAL_EV:
efx_handle_global_event(channel, &event);
@@ -1088,12 +1112,20 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
}
-/* Generates a test event on the event queue. A subsequent call to
- * process_eventq() should pick up the event and place the value of
- * "magic" into channel->eventq_magic;
- */
-void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
+void efx_nic_generate_test_event(struct efx_channel *channel)
{
+ unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
+ efx_qword_t test_event;
+
+ EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_generate_event(channel, &test_event);
+}
+
+void efx_nic_generate_fill_event(struct efx_channel *channel)
+{
+ unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
efx_qword_t test_event;
EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
@@ -1219,9 +1251,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
rx_queue->flushed = FLUSH_DONE;
}
- if (EFX_WORKAROUND_7803(efx))
- return 0;
-
return -ETIMEDOUT;
}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index bbc2c0c2f843..95770e15115d 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -190,8 +190,8 @@ extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_generate_test_event(struct efx_channel *channel,
- unsigned int magic);
+extern void efx_nic_generate_test_event(struct efx_channel *channel);
+extern void efx_nic_generate_fill_event(struct efx_channel *channel);
extern void efx_nic_generate_interrupt(struct efx_nic *efx);
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index e308818b9f55..9fb698e3519d 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -25,6 +25,9 @@
/* Number of RX descriptors pushed at once. */
#define EFX_RX_BATCH 8
+/* Maximum size of a buffer sharing a page */
+#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 64u
@@ -98,155 +101,138 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
return PAGE_SIZE << efx->rx_buffer_order;
}
-
/**
- * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
+ * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
*
* @rx_queue: Efx RX queue
- * @rx_buf: RX buffer structure to populate
*
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information. Return a negative error code or 0 on success.
+ * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
+ * struct efx_rx_buffer for each one. Return a negative error code or 0
+ * on success. May fail having only inserted fewer than EFX_RX_BATCH
+ * buffers.
*/
-static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
+ struct efx_rx_buffer *rx_buf;
int skb_len = efx->rx_buffer_len;
+ unsigned index, count;
- rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
- if (unlikely(!rx_buf->skb))
- return -ENOMEM;
+ for (count = 0; count < EFX_RX_BATCH; ++count) {
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ rx_buf = efx_rx_buffer(rx_queue, index);
- /* Adjust the SKB for padding and checksum */
- skb_reserve(rx_buf->skb, NET_IP_ALIGN);
- rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->data = (char *)rx_buf->skb->data;
- rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
+ if (unlikely(!rx_buf->skb))
+ return -ENOMEM;
+ rx_buf->page = NULL;
- rx_buf->dma_addr = pci_map_single(efx->pci_dev,
- rx_buf->data, rx_buf->len,
- PCI_DMA_FROMDEVICE);
+ /* Adjust the SKB for padding and checksum */
+ skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+ rx_buf->len = skb_len - NET_IP_ALIGN;
+ rx_buf->data = (char *)rx_buf->skb->data;
+ rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+ rx_buf->data, rx_buf->len,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev,
+ rx_buf->dma_addr))) {
+ dev_kfree_skb_any(rx_buf->skb);
+ rx_buf->skb = NULL;
+ return -EIO;
+ }
- if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
- dev_kfree_skb_any(rx_buf->skb);
- rx_buf->skb = NULL;
- return -EIO;
+ ++rx_queue->added_count;
+ ++rx_queue->alloc_skb_count;
}
return 0;
}
/**
- * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
+ * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
- * @rx_buf: RX buffer structure to populate
*
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information. Return a negative error code or 0 on success.
+ * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
+ * and populates struct efx_rx_buffers for each one. Return a negative error
+ * code or 0 on success. If a single page can be split between two buffers,
+ * then the page will either be inserted fully, or not at at all.
*/
-static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- int bytes, space, offset;
-
- bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
-
- /* If there is space left in the previously allocated page,
- * then use it. Otherwise allocate a new one */
- rx_buf->page = rx_queue->buf_page;
- if (rx_buf->page == NULL) {
- dma_addr_t dma_addr;
-
- rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
- efx->rx_buffer_order);
- if (unlikely(rx_buf->page == NULL))
+ struct efx_rx_buffer *rx_buf;
+ struct page *page;
+ void *page_addr;
+ struct efx_rx_page_state *state;
+ dma_addr_t dma_addr;
+ unsigned index, count;
+
+ /* We can split a page between two buffers */
+ BUILD_BUG_ON(EFX_RX_BATCH & 1);
+
+ for (count = 0; count < EFX_RX_BATCH; ++count) {
+ page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
return -ENOMEM;
-
- dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
- 0, efx_rx_buf_size(efx),
+ dma_addr = pci_map_page(efx->pci_dev, page, 0,
+ efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
-
if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
- __free_pages(rx_buf->page, efx->rx_buffer_order);
- rx_buf->page = NULL;
+ __free_pages(page, efx->rx_buffer_order);
return -EIO;
}
-
- rx_queue->buf_page = rx_buf->page;
- rx_queue->buf_dma_addr = dma_addr;
- rx_queue->buf_data = (page_address(rx_buf->page) +
- EFX_PAGE_IP_ALIGN);
- }
-
- rx_buf->len = bytes;
- rx_buf->data = rx_queue->buf_data;
- offset = efx_rx_buf_offset(rx_buf);
- rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
-
- /* Try to pack multiple buffers per page */
- if (efx->rx_buffer_order == 0) {
- /* The next buffer starts on the next 512 byte boundary */
- rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
- offset += ((bytes + 0x1ff) & ~0x1ff);
-
- space = efx_rx_buf_size(efx) - offset;
- if (space >= bytes) {
- /* Refs dropped on kernel releasing each skb */
- get_page(rx_queue->buf_page);
- goto out;
+ page_addr = page_address(page);
+ state = page_addr;
+ state->refcnt = 0;
+ state->dma_addr = dma_addr;
+
+ page_addr += sizeof(struct efx_rx_page_state);
+ dma_addr += sizeof(struct efx_rx_page_state);
+
+ split:
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->skb = NULL;
+ rx_buf->page = page;
+ rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ ++rx_queue->added_count;
+ ++rx_queue->alloc_page_count;
+ ++state->refcnt;
+
+ if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
+ /* Use the second half of the page */
+ get_page(page);
+ dma_addr += (PAGE_SIZE >> 1);
+ page_addr += (PAGE_SIZE >> 1);
+ ++count;
+ goto split;
}
}
- /* This is the final RX buffer for this page, so mark it for
- * unmapping */
- rx_queue->buf_page = NULL;
- rx_buf->unmap_addr = rx_queue->buf_dma_addr;
-
- out:
return 0;
}
-/* This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information.
- */
-static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *new_rx_buf)
-{
- int rc = 0;
-
- if (rx_queue->channel->rx_alloc_push_pages) {
- new_rx_buf->skb = NULL;
- rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
- rx_queue->alloc_page_count++;
- } else {
- new_rx_buf->page = NULL;
- rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
- rx_queue->alloc_skb_count++;
- }
-
- if (unlikely(rc < 0))
- EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
- rx_queue->queue, rc);
- return rc;
-}
-
static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
if (rx_buf->page) {
+ struct efx_rx_page_state *state;
+
EFX_BUG_ON_PARANOID(rx_buf->skb);
- if (rx_buf->unmap_addr) {
- pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
+
+ state = page_address(rx_buf->page);
+ if (--state->refcnt == 0) {
+ pci_unmap_page(efx->pci_dev,
+ state->dma_addr,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
- rx_buf->unmap_addr = 0;
}
} else if (likely(rx_buf->skb)) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
@@ -273,31 +259,84 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}
+/* Attempt to resurrect the other receive buffer that used to share this page,
+ * which had previously been passed up to the kernel and freed. */
+static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_rx_page_state *state = page_address(rx_buf->page);
+ struct efx_rx_buffer *new_buf;
+ unsigned fill_level, index;
+
+ /* +1 because efx_rx_packet() incremented removed_count. +1 because
+ * we'd like to insert an additional descriptor whilst leaving
+ * EFX_RXD_HEAD_ROOM for the non-recycle path */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
+ if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
+ /* We could place "state" on a list, and drain the list in
+ * efx_fast_push_rx_descriptors(). For now, this will do. */
+ return;
+ }
+
+ ++state->refcnt;
+ get_page(rx_buf->page);
+
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ new_buf = efx_rx_buffer(rx_queue, index);
+ new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
+ new_buf->skb = NULL;
+ new_buf->page = rx_buf->page;
+ new_buf->data = (void *)
+ ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+ new_buf->len = rx_buf->len;
+ ++rx_queue->added_count;
+}
+
+/* Recycle the given rx buffer directly back into the rx_queue. There is
+ * always room to add this buffer, because we've just popped a buffer. */
+static void efx_recycle_rx_buffer(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
+ struct efx_rx_buffer *new_buf;
+ unsigned index;
+
+ if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+ page_count(rx_buf->page) == 1)
+ efx_resurrect_rx_buffer(rx_queue, rx_buf);
+
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ new_buf = efx_rx_buffer(rx_queue, index);
+
+ memcpy(new_buf, rx_buf, sizeof(*new_buf));
+ rx_buf->page = NULL;
+ rx_buf->skb = NULL;
+ ++rx_queue->added_count;
+}
+
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
- * @retry: Recheck the fill level
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@fast_fill_limit. If there is insufficient atomic
- * memory to do so, the caller should retry.
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
*/
-static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
- int retry)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
- struct efx_rx_buffer *rx_buf;
- unsigned fill_level, index;
- int i, space, rc = 0;
+ struct efx_channel *channel = rx_queue->channel;
+ unsigned fill_level;
+ int space, rc = 0;
- /* Calculate current fill level. Do this outside the lock,
- * because most of the time we'll end up not wanting to do the
- * fill anyway.
- */
+ /* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
-
- /* Don't fill if we don't need to */
if (fill_level >= rx_queue->fast_fill_trigger)
- return 0;
+ goto out;
/* Record minimum fill level */
if (unlikely(fill_level < rx_queue->min_fill)) {
@@ -305,34 +344,25 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
rx_queue->min_fill = fill_level;
}
- /* Acquire RX add lock. If this lock is contended, then a fast
- * fill must already be in progress (e.g. in the refill
- * tasklet), so we don't need to do anything
- */
- if (!spin_trylock_bh(&rx_queue->add_lock))
- return -1;
-
- retry:
- /* Recalculate current fill level now that we have the lock */
- fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
space = rx_queue->fast_fill_limit - fill_level;
if (space < EFX_RX_BATCH)
- goto out_unlock;
+ goto out;
EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
" level %d to level %d using %s allocation\n",
rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
- rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
+ channel->rx_alloc_push_pages ? "page" : "skb");
do {
- for (i = 0; i < EFX_RX_BATCH; ++i) {
- index = rx_queue->added_count & EFX_RXQ_MASK;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rc = efx_init_rx_buffer(rx_queue, rx_buf);
- if (unlikely(rc))
- goto out;
- ++rx_queue->added_count;
+ if (channel->rx_alloc_push_pages)
+ rc = efx_init_rx_buffers_page(rx_queue);
+ else
+ rc = efx_init_rx_buffers_skb(rx_queue);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ if (rx_queue->added_count == rx_queue->removed_count)
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
}
} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
@@ -341,63 +371,18 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
rx_queue->added_count - rx_queue->removed_count);
out:
- /* Send write pointer to card. */
- efx_nic_notify_rx_desc(rx_queue);
-
- /* If the fast fill is running inside from the refill tasklet, then
- * for SMP systems it may be running on a different CPU to
- * RX event processing, which means that the fill level may now be
- * out of date. */
- if (unlikely(retry && (rc == 0)))
- goto retry;
-
- out_unlock:
- spin_unlock_bh(&rx_queue->add_lock);
-
- return rc;
-}
-
-/**
- * efx_fast_push_rx_descriptors - push new RX descriptors quickly
- * @rx_queue: RX descriptor queue
- *
- * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
- * it will schedule a work item to immediately continue the fast fill
- */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
-{
- int rc;
-
- rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
- if (unlikely(rc)) {
- /* Schedule the work item to run immediately. The hope is
- * that work is immediately pending to free some memory
- * (e.g. an RX event or TX completion)
- */
- efx_schedule_slow_fill(rx_queue, 0);
- }
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
}
-void efx_rx_work(struct work_struct *data)
+void efx_rx_slow_fill(unsigned long context)
{
- struct efx_rx_queue *rx_queue;
- int rc;
-
- rx_queue = container_of(data, struct efx_rx_queue, work.work);
-
- if (unlikely(!rx_queue->channel->enabled))
- return;
-
- EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
- "%d\n", rx_queue->queue, raw_smp_processor_id());
+ struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
+ struct efx_channel *channel = rx_queue->channel;
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(channel);
++rx_queue->slow_fill_count;
- /* Push new RX descriptors, allowing at least 1 jiffy for
- * the kernel to free some more memory. */
- rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
- if (rc)
- efx_schedule_slow_fill(rx_queue, 1);
}
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
@@ -498,6 +483,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard)
{
struct efx_nic *efx = rx_queue->efx;
+ struct efx_channel *channel = rx_queue->channel;
struct efx_rx_buffer *rx_buf;
bool leak_packet = false;
@@ -525,12 +511,13 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Discard packet, if instructed to do so */
if (unlikely(discard)) {
if (unlikely(leak_packet))
- rx_queue->channel->n_skbuff_leaks++;
+ channel->n_skbuff_leaks++;
else
- /* We haven't called efx_unmap_rx_buffer yet,
- * so fini the entire rx_buffer here */
- efx_fini_rx_buffer(rx_queue, rx_buf);
- return;
+ efx_recycle_rx_buffer(channel, rx_buf);
+
+ /* Don't hold off the previous receive */
+ rx_buf = NULL;
+ goto out;
}
/* Release card resources - assumes all RX buffers consumed in-order
@@ -547,6 +534,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
* prefetched into cache.
*/
rx_buf->len = len;
+out:
if (rx_queue->channel->rx_pkt)
__efx_rx_packet(rx_queue->channel,
rx_queue->channel->rx_pkt,
@@ -682,6 +670,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
+ del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
/* Release RX buffers NB start at index 0 not current HW ptr */
@@ -691,16 +680,6 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
efx_fini_rx_buffer(rx_queue, rx_buf);
}
}
-
- /* For a page that is part-way through splitting into RX buffers */
- if (rx_queue->buf_page != NULL) {
- pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
- efx_rx_buf_size(rx_queue->efx),
- PCI_DMA_FROMDEVICE);
- __free_pages(rx_queue->buf_page,
- rx_queue->efx->rx_buffer_order);
- rx_queue->buf_page = NULL;
- }
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 371e86cc090f..1f83404af63b 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -38,7 +38,7 @@ struct efx_loopback_payload {
struct udphdr udp;
__be16 iteration;
const char msg[64];
-} __attribute__ ((packed));
+} __packed;
/* Loopback test source MAC address */
static const unsigned char payload_source[ETH_ALEN] = {
@@ -161,23 +161,17 @@ static int efx_test_interrupts(struct efx_nic *efx,
static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
- unsigned int magic, count;
-
- /* Channel specific code, limited to 20 bits */
- magic = (0x00010150 + channel->channel);
- EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
- channel->channel, magic);
+ unsigned int magic_count, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
- /* Reset flag and zero magic word */
+ magic_count = channel->magic_count;
channel->efx->last_irq_cpu = -1;
- channel->eventq_magic = 0;
smp_wmb();
- efx_nic_generate_test_event(channel, magic);
+ efx_nic_generate_test_event(channel);
/* Wait for arrival of interrupt */
count = 0;
@@ -187,7 +181,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
if (channel->work_pending)
efx_process_channel_now(channel);
- if (channel->eventq_magic == magic)
+ if (channel->magic_count != magic_count)
goto eventq_ok;
} while (++count < 2);
@@ -204,7 +198,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
/* Check to see if event was received even if interrupt wasn't */
efx_process_channel_now(channel);
- if (channel->eventq_magic == magic) {
+ if (channel->magic_count != magic_count) {
EFX_ERR(channel->efx, "channel %d event was generated, but "
"failed to trigger an interrupt\n", channel->channel);
tests->eventq_dma[channel->channel] = 1;
@@ -545,7 +539,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
static int efx_wait_for_link(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
- int count;
+ int count, link_up_count = 0;
bool link_up;
for (count = 0; count < 40; count++) {
@@ -567,8 +561,12 @@ static int efx_wait_for_link(struct efx_nic *efx)
link_up = !efx->mac_op->check_fault(efx);
mutex_unlock(&efx->mac_lock);
- if (link_up)
- return 0;
+ if (link_up) {
+ if (++link_up_count == 2)
+ return 0;
+ } else {
+ link_up_count = 0;
+ }
}
return -ETIMEDOUT;
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 518f7fc91473..782e45a613d6 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -54,7 +54,7 @@
/* Increase filter depth to avoid RX_RESET */
#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
/* Flushes may never complete */
-#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A
+#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 084eff21b67a..61891a6cacc2 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2161,21 +2161,21 @@ struct sky2_tx_le {
__le16 length; /* also vlan tag or checksum start */
u8 ctrl;
u8 opcode;
-} __attribute((packed));
+} __packed;
struct sky2_rx_le {
__le32 addr;
__le16 length;
u8 ctrl;
u8 opcode;
-} __attribute((packed));
+} __packed;
struct sky2_status_le {
__le32 status; /* also checksum */
__le16 length; /* also vlan tag */
u8 css;
u8 opcode;
-} __attribute((packed));
+} __packed;
struct tx_ring_info {
struct sk_buff *skb;
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index cff98d07cba8..67e3b71bf705 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -334,7 +334,7 @@ struct txd_desc {
u32 va_lo;
u32 va_hi;
struct pbl pbl[0]; /* Fragments */
-} __attribute__ ((packed));
+} __packed;
/* Register region size */
#define BDX_REGS_SIZE 0x1000
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 573054ae7b58..289cdc5fde92 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
-#define DRV_MODULE_VERSION "3.110"
-#define DRV_MODULE_RELDATE "April 9, 2010"
+#define DRV_MODULE_VERSION "3.111"
+#define DRV_MODULE_RELDATE "June 5, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -145,8 +145,6 @@
#define TG3_RX_JMB_BUFF_RING_SIZE \
(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
-#define TG3_RSS_MIN_NUM_MSIX_VECS 2
-
/* Due to a hardware bug, the 5701 can only DMA to memory addresses
* that are at least dword aligned when used in PCIX mode. The driver
* works around this bug by double copying the packet. This workaround
@@ -272,6 +270,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -585,18 +584,23 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
static void tg3_ape_lock_init(struct tg3 *tp)
{
int i;
+ u32 regbase;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ regbase = TG3_APE_LOCK_GRANT;
+ else
+ regbase = TG3_APE_PER_LOCK_GRANT;
/* Make sure the driver hasn't any stale locks. */
for (i = 0; i < 8; i++)
- tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
- APE_LOCK_GRANT_DRIVER);
+ tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
}
static int tg3_ape_lock(struct tg3 *tp, int locknum)
{
int i, off;
int ret = 0;
- u32 status;
+ u32 status, req, gnt;
if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
return 0;
@@ -609,13 +613,21 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return -EINVAL;
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ req = TG3_APE_LOCK_REQ;
+ gnt = TG3_APE_LOCK_GRANT;
+ } else {
+ req = TG3_APE_PER_LOCK_REQ;
+ gnt = TG3_APE_PER_LOCK_GRANT;
+ }
+
off = 4 * locknum;
- tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
+ tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
/* Wait for up to 1 millisecond to acquire lock. */
for (i = 0; i < 100; i++) {
- status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
+ status = tg3_ape_read32(tp, gnt + off);
if (status == APE_LOCK_GRANT_DRIVER)
break;
udelay(10);
@@ -623,7 +635,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
if (status != APE_LOCK_GRANT_DRIVER) {
/* Revoke the lock request. */
- tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
+ tg3_ape_write32(tp, gnt + off,
APE_LOCK_GRANT_DRIVER);
ret = -EBUSY;
@@ -634,7 +646,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
static void tg3_ape_unlock(struct tg3 *tp, int locknum)
{
- int off;
+ u32 gnt;
if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
return;
@@ -647,8 +659,12 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
}
- off = 4 * locknum;
- tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ gnt = TG3_APE_LOCK_GRANT;
+ else
+ gnt = TG3_APE_PER_LOCK_GRANT;
+
+ tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
}
static void tg3_disable_ints(struct tg3 *tp)
@@ -1069,14 +1085,11 @@ static int tg3_mdio_init(struct tg3 *tp)
u32 reg;
struct phy_device *phydev;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
- u32 funcnum, is_serdes;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ u32 is_serdes;
- funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
- if (funcnum)
- tp->phy_addr = 2;
- else
- tp->phy_addr = 1;
+ tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
@@ -1589,7 +1602,8 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
u32 reg;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
return;
@@ -1964,7 +1978,8 @@ static int tg3_phy_reset(struct tg3 *tp)
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
(tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
return 0;
@@ -2049,6 +2064,7 @@ static void tg3_frob_aux_power(struct tg3 *tp)
/* The GPIOs do something completely different on 57765. */
if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
return;
@@ -4191,6 +4207,8 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
+ } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ /* Link is up via parallel detect */
} else {
current_link_up = 0;
}
@@ -6212,6 +6230,8 @@ static void tg3_free_rings(struct tg3 *tp)
for (j = 0; j < tp->irq_cnt; j++) {
struct tg3_napi *tnapi = &tp->napi[j];
+ tg3_rx_prodring_free(tp, &tp->prodring[j]);
+
if (!tnapi->tx_buffers)
continue;
@@ -6247,8 +6267,6 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
-
- tg3_rx_prodring_free(tp, &tp->prodring[j]);
}
}
@@ -6782,7 +6800,8 @@ static void tg3_restore_pci_state(struct tg3 *tp)
/* Allow reads and writes to the APE register and memory space. */
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
- PCISTATE_ALLOW_APE_SHMEM_WR;
+ PCISTATE_ALLOW_APE_SHMEM_WR |
+ PCISTATE_ALLOW_APE_PSPACE_WR;
pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
@@ -7069,6 +7088,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
val = tr32(0x7c00);
@@ -7504,7 +7524,8 @@ static void tg3_rings_reset(struct tg3 *tp)
/* Disable all receive return rings but the first. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
@@ -7720,7 +7741,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
*/
val = tr32(TG3PCI_PCISTATE);
val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
- PCISTATE_ALLOW_APE_SHMEM_WR;
+ PCISTATE_ALLOW_APE_SHMEM_WR |
+ PCISTATE_ALLOW_APE_PSPACE_WR;
tw32(TG3PCI_PCISTATE, val);
}
@@ -7741,6 +7763,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
@@ -7869,7 +7892,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
((u64) tpr->rx_std_mapping >> 32));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tpr->rx_std_mapping & 0xffffffff));
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_BUFFER_DESC);
@@ -7894,7 +7918,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
(RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
BDINFO_FLAGS_USE_EXT_RECV);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -7903,6 +7928,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
(TG3_RX_STD_DMA_SZ << 2);
@@ -7921,6 +7947,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
tw32(STD_REPLENISH_LWM, 32);
tw32(JMB_REPLENISH_LWM, 16);
@@ -7956,7 +7983,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8195,6 +8223,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
tp->tx_mode = TX_MODE_ENABLE;
+ if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
tw32_f(MAC_TX_MODE, tp->tx_mode);
udelay(100);
@@ -8206,7 +8237,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
int idx = i % sizeof(val);
- ent[idx] = i % (tp->irq_cnt - 1);
+ ent[idx] = (i % (tp->irq_cnt - 1)) + 1;
if (idx == sizeof(val) - 1) {
tw32(reg, val);
reg += 4;
@@ -8511,8 +8542,10 @@ static void tg3_timer(unsigned long __opaque)
}
tg3_setup_phy(tp, 0);
}
- } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+ } else if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
tg3_serdes_parallel_detect(tp);
+ }
tp->timer_counter = tp->timer_multiplier;
}
@@ -8606,6 +8639,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
* observable way to know whether the interrupt was delivered.
*/
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
@@ -8650,6 +8684,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
if (intr_ok) {
/* Reenable MSI one shot mode. */
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
@@ -8775,9 +8810,9 @@ static bool tg3_enable_msix(struct tg3 *tp)
}
rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
- if (rc != 0) {
- if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
- return false;
+ if (rc < 0) {
+ return false;
+ } else if (rc != 0) {
if (pci_enable_msix(tp->pdev, msix_ent, rc))
return false;
netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
@@ -8785,16 +8820,19 @@ static bool tg3_enable_msix(struct tg3 *tp)
tp->irq_cnt = rc;
}
- tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
-
for (i = 0; i < tp->irq_max; i++)
tp->napi[i].irq_vec = msix_ent[i].vector;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
- tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
- tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
- } else
- tp->dev->real_num_tx_queues = 1;
+ tp->dev->real_num_tx_queues = 1;
+ if (tp->irq_cnt > 1) {
+ tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
+ tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
+ }
+ }
return true;
}
@@ -8943,6 +8981,7 @@ static int tg3_open(struct net_device *dev)
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
(tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
(tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
@@ -10554,7 +10593,8 @@ static int tg3_test_memory(struct tg3 *tp)
int err = 0;
int i;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
mem_tbl = mem_tbl_5717;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
mem_tbl = mem_tbl_57765;
@@ -11634,7 +11674,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tg3_get_57780_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
tg3_get_5717_nvram_info(tp);
else
tg3_get_nvram_info(tp);
@@ -12070,11 +12111,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->phy_id = eeprom_phy_id;
if (eeprom_phy_serdes) {
- if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
- tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
- else
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+ else
+ tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
}
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
@@ -12804,7 +12844,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
pci_read_config_dword(tp->pdev,
TG3PCI_GEN2_PRODID_ASICREV,
&prod_id_asic_rev);
@@ -12970,6 +13011,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
@@ -12999,6 +13041,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Determine TSO capabilities */
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
@@ -13036,6 +13079,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
tp->irq_max = TG3_IRQ_MAX_VECS;
@@ -13043,6 +13087,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
@@ -13051,6 +13096,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
@@ -13242,7 +13288,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
* APE register and memory space.
*/
pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
- PCISTATE_ALLOW_APE_SHMEM_WR;
+ PCISTATE_ALLOW_APE_SHMEM_WR |
+ PCISTATE_ALLOW_APE_PSPACE_WR;
pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
pci_state_reg);
}
@@ -13252,6 +13299,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
@@ -13332,6 +13380,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
@@ -13580,9 +13629,12 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
else
tg3_nvram_unlock(tp);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
- if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ if (PCI_FUNC(tp->pdev->devfn) & 1)
mac_offset = 0xcc;
+ if (PCI_FUNC(tp->pdev->devfn) > 1)
+ mac_offset += 0x18c;
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mac_offset = 0x10;
@@ -13668,6 +13720,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
#endif
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
goto out;
@@ -13880,6 +13933,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
goto out;
@@ -14079,6 +14133,7 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
tp->bufmgr_config.mbuf_read_dma_low_water =
DEFAULT_MB_RDMA_LOW_WATER_5705;
@@ -14156,6 +14211,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
case TG3_PHY_ID_BCM5718C: return "5718C";
case TG3_PHY_ID_BCM5718S: return "5718S";
case TG3_PHY_ID_BCM57765: return "57765";
+ case TG3_PHY_ID_BCM5719C: return "5719C";
case TG3_PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
@@ -14404,7 +14460,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
}
if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
dev->netdev_ops = &tg3_netdev_ops;
else
dev->netdev_ops = &tg3_netdev_ops_dma_bug;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ce9c4918c318..6b6af7698b38 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -53,6 +53,7 @@
#define TG3PCI_DEVICE_TIGON3_57765 0x16b4
#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
+#define TG3PCI_DEVICE_TIGON3_5719 0x1657
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -160,6 +161,7 @@
#define ASIC_REV_57780 0x57780
#define ASIC_REV_5717 0x5717
#define ASIC_REV_57765 0x57785
+#define ASIC_REV_5719 0x5719
#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
@@ -231,6 +233,7 @@
#define PCISTATE_RETRY_SAME_DMA 0x00002000
#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000
#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000
+#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000
#define TG3PCI_CLOCK_CTRL 0x00000074
#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200
#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400
@@ -468,6 +471,7 @@
#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010
#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020
#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040
+#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100
#define MAC_TX_STATUS 0x00000460
#define TX_STATUS_XOFFED 0x00000001
#define TX_STATUS_SENT_XOFF 0x00000002
@@ -1071,10 +1075,8 @@
#define TG3_CPMU_HST_ACC 0x0000361c
#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000
#define CPMU_HST_ACC_MACCLK_6_25 0x00130000
-/* 0x3620 --> 0x362c unused */
+/* 0x3620 --> 0x3630 unused */
-#define TG3_CPMU_STATUS 0x0000362c
-#define TG3_CPMU_STATUS_PCIE_FUNC 0x20000000
#define TG3_CPMU_CLCK_STAT 0x00003630
#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -2209,6 +2211,11 @@
#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000
#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000
+#define TG3_APE_PER_LOCK_REQ 0x8400
+#define APE_LOCK_PER_REQ_DRIVER 0x00001000
+#define TG3_APE_PER_LOCK_GRANT 0x8420
+#define APE_PER_LOCK_GRANT_DRIVER 0x00001000
+
/* APE convenience enumerations. */
#define TG3_APE_LOCK_GRC 1
#define TG3_APE_LOCK_MEM 4
@@ -2942,6 +2949,7 @@ struct tg3 {
#define TG3_PHY_ID_BCM5718C 0x5c0d8a00
#define TG3_PHY_ID_BCM5718S 0xbc050ff0
#define TG3_PHY_ID_BCM57765 0x5c0d8a40
+#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
#define TG3_PHY_ID_BCM5906 0xdc00ac40
#define TG3_PHY_ID_BCM8002 0x60010140
#define TG3_PHY_ID_INVALID 0xffffffff
@@ -2965,7 +2973,8 @@ struct tg3 {
(X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
(X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
(X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
- (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM8002)
+ (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
+ (X) == TG3_PHY_ID_BCM8002)
u32 led_ctrl;
u32 phy_otp;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index c0e70006374e..960962660079 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -262,13 +262,13 @@ struct de_srom_media_block {
u16 csr13;
u16 csr14;
u16 csr15;
-} __attribute__((packed));
+} __packed;
struct de_srom_info_leaf {
u16 default_media;
u8 n_blocks;
u8 unused;
-} __attribute__((packed));
+} __packed;
struct de_desc {
__le32 opts1;
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 6002e651b9ea..3031ed9c4a1a 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -120,8 +120,8 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
0x00, 0x06 /* ttm bit map */
};
- tp->mtable = (struct mediatable *)
- kmalloc(sizeof(struct mediatable) + sizeof(struct medialeaf), GFP_KERNEL);
+ tp->mtable = kmalloc(sizeof(struct mediatable) +
+ sizeof(struct medialeaf), GFP_KERNEL);
if (tp->mtable == NULL)
return; /* Horrible, impossible failure. */
@@ -227,9 +227,9 @@ subsequent_board:
return;
}
- mtable = (struct mediatable *)
- kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf),
- GFP_KERNEL);
+ mtable = kmalloc(sizeof(struct mediatable) +
+ count * sizeof(struct medialeaf),
+ GFP_KERNEL);
if (mtable == NULL)
return; /* Horrible, impossible failure. */
last_mediatable = tp->mtable = mtable;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 0afa2d4f9472..e525875ed67d 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
+#include <linux/ethtool.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/pci.h>
@@ -51,22 +52,23 @@ struct tulip_chip_table {
enum tbl_flag {
- HAS_MII = 0x0001,
- HAS_MEDIA_TABLE = 0x0002,
- CSR12_IN_SROM = 0x0004,
- ALWAYS_CHECK_MII = 0x0008,
- HAS_ACPI = 0x0010,
- MC_HASH_ONLY = 0x0020, /* Hash-only multicast filter. */
- HAS_PNICNWAY = 0x0080,
- HAS_NWAY = 0x0040, /* Uses internal NWay xcvr. */
- HAS_INTR_MITIGATION = 0x0100,
- IS_ASIX = 0x0200,
- HAS_8023X = 0x0400,
- COMET_MAC_ADDR = 0x0800,
- HAS_PCI_MWI = 0x1000,
- HAS_PHY_IRQ = 0x2000,
- HAS_SWAPPED_SEEPROM = 0x4000,
- NEEDS_FAKE_MEDIA_TABLE = 0x8000,
+ HAS_MII = 0x00001,
+ HAS_MEDIA_TABLE = 0x00002,
+ CSR12_IN_SROM = 0x00004,
+ ALWAYS_CHECK_MII = 0x00008,
+ HAS_ACPI = 0x00010,
+ MC_HASH_ONLY = 0x00020, /* Hash-only multicast filter. */
+ HAS_PNICNWAY = 0x00080,
+ HAS_NWAY = 0x00040, /* Uses internal NWay xcvr. */
+ HAS_INTR_MITIGATION = 0x00100,
+ IS_ASIX = 0x00200,
+ HAS_8023X = 0x00400,
+ COMET_MAC_ADDR = 0x00800,
+ HAS_PCI_MWI = 0x01000,
+ HAS_PHY_IRQ = 0x02000,
+ HAS_SWAPPED_SEEPROM = 0x04000,
+ NEEDS_FAKE_MEDIA_TABLE = 0x08000,
+ COMET_PM = 0x10000,
};
@@ -120,6 +122,11 @@ enum tulip_offsets {
CSR13 = 0x68,
CSR14 = 0x70,
CSR15 = 0x78,
+ CSR18 = 0x88,
+ CSR19 = 0x8c,
+ CSR20 = 0x90,
+ CSR27 = 0xAC,
+ CSR28 = 0xB0,
};
/* register offset and bits for CFDD PCI config reg */
@@ -289,6 +296,30 @@ enum t21143_csr6_bits {
csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
};
+enum tulip_comet_csr13_bits {
+/* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they
+ * determine which link status transition wakes up if LSCE is
+ * enabled */
+ comet_csr13_linkoffe = (1 << 17),
+ comet_csr13_linkone = (1 << 16),
+ comet_csr13_wfre = (1 << 10),
+ comet_csr13_mpre = (1 << 9),
+ comet_csr13_lsce = (1 << 8),
+ comet_csr13_wfr = (1 << 2),
+ comet_csr13_mpr = (1 << 1),
+ comet_csr13_lsc = (1 << 0),
+};
+
+enum tulip_comet_csr18_bits {
+ comet_csr18_pmes_sticky = (1 << 24),
+ comet_csr18_pm_mode = (1 << 19),
+ comet_csr18_apm_mode = (1 << 18),
+ comet_csr18_d3a = (1 << 7)
+};
+
+enum tulip_comet_csr20_bits {
+ comet_csr20_pmes = (1 << 15),
+};
/* Keep the ring sizes a power of two for efficiency.
Making the Tx ring too large decreases the effectiveness of channel
@@ -411,6 +442,7 @@ struct tulip_private {
unsigned int csr6; /* Current CSR6 control settings. */
unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
void (*link_change) (struct net_device * dev, int csr5);
+ struct ethtool_wolinfo wolinfo; /* WOL settings */
u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */
u16 lpar; /* 21143 Link partner ability. */
u16 advertising[4];
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 254643ed945e..03e96b928c04 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -30,7 +30,6 @@
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/mii.h>
-#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <asm/unaligned.h>
#include <asm/uaccess.h>
@@ -272,6 +271,7 @@ static void tulip_down(struct net_device *dev);
static struct net_device_stats *tulip_get_stats(struct net_device *dev);
static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void set_rx_mode(struct net_device *dev);
+static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_tulip(struct net_device *dev);
#endif
@@ -309,6 +309,11 @@ static void tulip_up(struct net_device *dev)
/* Wake the chip from sleep/snooze mode. */
tulip_set_power_state (tp, 0, 0);
+ /* Disable all WOL events */
+ pci_enable_wake(tp->pdev, PCI_D3hot, 0);
+ pci_enable_wake(tp->pdev, PCI_D3cold, 0);
+ tulip_set_wolopts(tp->pdev, 0);
+
/* On some chip revs we must set the MII/SYM port before the reset!? */
if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
iowrite32(0x00040000, ioaddr + CSR6);
@@ -345,8 +350,8 @@ static void tulip_up(struct net_device *dev)
} else if (tp->flags & COMET_MAC_ADDR) {
iowrite32(addr_low, ioaddr + 0xA4);
iowrite32(addr_high, ioaddr + 0xA8);
- iowrite32(0, ioaddr + 0xAC);
- iowrite32(0, ioaddr + 0xB0);
+ iowrite32(0, ioaddr + CSR27);
+ iowrite32(0, ioaddr + CSR28);
}
} else {
/* This is set_rx_mode(), but without starting the transmitter. */
@@ -876,8 +881,35 @@ static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
strcpy(info->bus_info, pci_name(np->pdev));
}
+
+static int tulip_ethtool_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+
+ if (wolinfo->wolopts & (~tp->wolinfo.supported))
+ return -EOPNOTSUPP;
+
+ tp->wolinfo.wolopts = wolinfo->wolopts;
+ device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
+ return 0;
+}
+
+static void tulip_ethtool_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct tulip_private *tp = netdev_priv(dev);
+
+ wolinfo->supported = tp->wolinfo.supported;
+ wolinfo->wolopts = tp->wolinfo.wolopts;
+ return;
+}
+
+
static const struct ethtool_ops ops = {
- .get_drvinfo = tulip_get_drvinfo
+ .get_drvinfo = tulip_get_drvinfo,
+ .set_wol = tulip_ethtool_set_wol,
+ .get_wol = tulip_ethtool_get_wol,
};
/* Provide ioctl() calls to examine the MII xcvr state. */
@@ -1093,8 +1125,8 @@ static void set_rx_mode(struct net_device *dev)
iowrite32(3, ioaddr + CSR13);
iowrite32(mc_filter[1], ioaddr + CSR14);
} else if (tp->flags & COMET_MAC_ADDR) {
- iowrite32(mc_filter[0], ioaddr + 0xAC);
- iowrite32(mc_filter[1], ioaddr + 0xB0);
+ iowrite32(mc_filter[0], ioaddr + CSR27);
+ iowrite32(mc_filter[1], ioaddr + CSR28);
}
tp->mc_filter[0] = mc_filter[0];
tp->mc_filter[1] = mc_filter[1];
@@ -1381,6 +1413,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
return i;
}
+ /* The chip will fail to enter a low-power state later unless
+ * first explicitly commanded into D0 */
+ if (pci_set_power_state(pdev, PCI_D0)) {
+ printk (KERN_NOTICE PFX
+ "Failed to set power state to D0\n");
+ }
+
irq = pdev->irq;
/* alloc_etherdev ensures aligned and zeroed private structures */
@@ -1427,6 +1466,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
tp->chip_id = chip_idx;
tp->flags = tulip_tbl[chip_idx].flags;
+
+ tp->wolinfo.supported = 0;
+ tp->wolinfo.wolopts = 0;
+ /* COMET: Enable power management only for AN983B */
+ if (chip_idx == COMET ) {
+ u32 sig;
+ pci_read_config_dword (pdev, 0x80, &sig);
+ if (sig == 0x09811317) {
+ tp->flags |= COMET_PM;
+ tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
+ printk(KERN_INFO "tulip_init_one: Enabled WOL support for AN983B\n");
+ }
+ }
tp->pdev = pdev;
tp->base_addr = ioaddr;
tp->revision = pdev->revision;
@@ -1759,11 +1811,43 @@ err_out_free_netdev:
}
+/* set the registers according to the given wolopts */
+static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
+
+ if (tp->flags & COMET_PM) {
+
+ unsigned int tmp;
+
+ tmp = ioread32(ioaddr + CSR18);
+ tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
+ tmp |= comet_csr18_pm_mode;
+ iowrite32(tmp, ioaddr + CSR18);
+
+ /* Set the Wake-up Control/Status Register to the given WOL options*/
+ tmp = ioread32(ioaddr + CSR13);
+ tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
+ if (wolopts & WAKE_MAGIC)
+ tmp |= comet_csr13_mpre;
+ if (wolopts & WAKE_PHY)
+ tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
+ /* Clear the event flags */
+ tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
+ iowrite32(tmp, ioaddr + CSR13);
+ }
+}
+
#ifdef CONFIG_PM
+
static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
{
+ pci_power_t pstate;
struct net_device *dev = pci_get_drvdata(pdev);
+ struct tulip_private *tp = netdev_priv(dev);
if (!dev)
return -EINVAL;
@@ -1779,7 +1863,16 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
save_state:
pci_save_state(pdev);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pstate = pci_choose_state(pdev, state);
+ if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
+ int rc;
+
+ tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
+ rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
+ if (rc)
+ printk("tulip: pci_enable_wake failed (%d)\n", rc);
+ }
+ pci_set_power_state(pdev, pstate);
return 0;
}
@@ -1788,7 +1881,10 @@ save_state:
static int tulip_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
+ struct tulip_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->base_addr;
int retval;
+ unsigned int tmp;
if (!dev)
return -EINVAL;
@@ -1809,6 +1905,18 @@ static int tulip_resume(struct pci_dev *pdev)
return retval;
}
+ if (tp->flags & COMET_PM) {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ /* Clear the PMES flag */
+ tmp = ioread32(ioaddr + CSR20);
+ tmp |= comet_csr20_pmes;
+ iowrite32(tmp, ioaddr + CSR20);
+
+ /* Disable all wake-up events */
+ tulip_set_wolopts(pdev, 0);
+ }
netif_device_attach(dev);
if (netif_running(dev))
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 22bde49262c0..2e50077ff450 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -255,7 +255,7 @@ struct typhoon_shared {
struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
u32 zeroWord;
struct tx_desc txHi[TXHI_ENTRIES];
-} __attribute__ ((packed));
+} __packed;
struct rxbuff_ent {
struct sk_buff *skb;
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h
index 673fd5125914..88187fc84aa3 100644
--- a/drivers/net/typhoon.h
+++ b/drivers/net/typhoon.h
@@ -77,7 +77,7 @@ struct typhoon_indexes {
volatile __le32 cmdCleared;
volatile __le32 respReady;
volatile __le32 rxHiReady;
-} __attribute__ ((packed));
+} __packed;
/* The host<->Typhoon interface
* Our means of communicating where things are
@@ -125,7 +125,7 @@ struct typhoon_interface {
__le32 rxHiAddr;
__le32 rxHiAddrHi;
__le32 rxHiSize;
-} __attribute__ ((packed));
+} __packed;
/* The Typhoon transmit/fragment descriptor
*
@@ -187,7 +187,7 @@ struct tx_desc {
#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000)
#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000)
#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12
-} __attribute__ ((packed));
+} __packed;
/* The TCP Segmentation offload option descriptor
*
@@ -208,7 +208,7 @@ struct tcpopt_desc {
__le32 respAddrLo;
__le32 bytesTx;
__le32 status;
-} __attribute__ ((packed));
+} __packed;
/* The IPSEC Offload descriptor
*
@@ -227,7 +227,7 @@ struct ipsec_desc {
__le32 sa1;
__le32 sa2;
__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
/* The Typhoon receive descriptor (Updated by NIC)
*
@@ -284,7 +284,7 @@ struct rx_desc {
#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100)
#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200)
__be32 vlanTag;
-} __attribute__ ((packed));
+} __packed;
/* The Typhoon free buffer descriptor, used to give a buffer to the NIC
*
@@ -301,7 +301,7 @@ struct rx_free {
__le32 physAddrHi;
u32 virtAddr;
u32 virtAddrHi;
-} __attribute__ ((packed));
+} __packed;
/* The Typhoon command descriptor, used for commands and responses
*
@@ -347,7 +347,7 @@ struct cmd_desc {
__le16 parm1;
__le32 parm2;
__le32 parm3;
-} __attribute__ ((packed));
+} __packed;
/* The Typhoon response descriptor, see command descriptor for details
*/
@@ -359,7 +359,7 @@ struct resp_desc {
__le16 parm1;
__le32 parm2;
__le32 parm3;
-} __attribute__ ((packed));
+} __packed;
#define INIT_COMMAND_NO_RESPONSE(x, command) \
do { struct cmd_desc *_ptr = (x); \
@@ -427,7 +427,7 @@ struct stats_resp {
#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000)
__le32 unused2;
__le32 unused3;
-} __attribute__ ((packed));
+} __packed;
/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1)
*/
@@ -488,7 +488,7 @@ struct sa_descriptor {
u32 index;
u32 unused;
u32 unused2;
-} __attribute__ ((packed));
+} __packed;
/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx))
* This is all for IPv4.
@@ -518,14 +518,14 @@ struct typhoon_file_header {
__le32 numSections;
__le32 startAddr;
__le32 hmacDigest[5];
-} __attribute__ ((packed));
+} __packed;
struct typhoon_section_header {
__le32 len;
u16 checksum;
u16 reserved;
__le32 startAddr;
-} __attribute__ ((packed));
+} __packed;
/* The Typhoon Register offsets
*/
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 807470e156af..dc32a62e611c 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3704,6 +3704,19 @@ static phy_interface_t to_phy_interface(const char *phy_connection_type)
return PHY_INTERFACE_MODE_MII;
}
+static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!ugeth->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(ugeth->phydev, if_mii(rq), cmd);
+}
+
static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_open = ucc_geth_open,
.ndo_stop = ucc_geth_close,
@@ -3713,6 +3726,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_set_multicast_list = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
+ .ndo_do_ioctl = ucc_geth_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ucc_netpoll,
#endif
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index ef1fbeb11c6e..05a95586f3c5 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -106,7 +106,7 @@ struct ucc_geth {
u32 scar; /* Statistics carry register */
u32 scam; /* Statistics caryy mask register */
u8 res5[0x200 - 0x1c4];
-} __attribute__ ((packed));
+} __packed;
/* UCC GETH TEMODR Register */
#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
@@ -420,11 +420,11 @@ struct ucc_geth {
struct ucc_geth_thread_data_tx {
u8 res0[104];
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_thread_data_rx {
u8 res0[40];
-} __attribute__ ((packed));
+} __packed;
/* Send Queue Queue-Descriptor */
struct ucc_geth_send_queue_qd {
@@ -432,19 +432,19 @@ struct ucc_geth_send_queue_qd {
u8 res0[0x8];
u32 last_bd_completed_address;/* initialize to last entry in BD ring */
u8 res1[0x30];
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_send_queue_mem_region {
struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES];
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_thread_tx_pram {
u8 res0[64];
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_thread_rx_pram {
u8 res0[128];
-} __attribute__ ((packed));
+} __packed;
#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
@@ -484,7 +484,7 @@ struct ucc_geth_scheduler {
/**< weight factor for queues */
u32 minw; /* temporary variable handled by QE */
u8 res1[0x70 - 0x64];
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_tx_firmware_statistics_pram {
u32 sicoltx; /* single collision */
@@ -506,7 +506,7 @@ struct ucc_geth_tx_firmware_statistics_pram {
and 1518 octets */
u32 txpktsjumbo; /* total packets (including bad) between 1024
and MAXLength octets */
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_rx_firmware_statistics_pram {
u32 frrxfcser; /* frames with crc error */
@@ -540,7 +540,7 @@ struct ucc_geth_rx_firmware_statistics_pram {
replaced */
u32 insertvlan; /* total frames that had their VLAN tag
inserted */
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_rx_interrupt_coalescing_entry {
u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
@@ -548,23 +548,23 @@ struct ucc_geth_rx_interrupt_coalescing_entry {
u32 interruptcoalescingcounter; /* interrupt coalescing counter,
initialize to
interruptcoalescingmaxvalue */
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_rx_interrupt_coalescing_table {
struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES];
/**< interrupt coalescing entry */
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_rx_prefetched_bds {
struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_rx_bd_queues_entry {
u32 bdbaseptr; /* BD base pointer */
u32 bdptr; /* BD pointer */
u32 externalbdbaseptr; /* external BD base pointer */
u32 externalbdptr; /* external BD pointer */
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_tx_global_pram {
u16 temoder;
@@ -580,13 +580,13 @@ struct ucc_geth_tx_global_pram {
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
u8 res2[0x80 - 0x74];
-} __attribute__ ((packed));
+} __packed;
/* structure representing Extended Filtering Global Parameters in PRAM */
struct ucc_geth_exf_global_pram {
u32 l2pcdptr; /* individual address filter, high */
u8 res0[0x10 - 0x04];
-} __attribute__ ((packed));
+} __packed;
struct ucc_geth_rx_global_pram {
u32 remoder; /* ethernet mode reg. */
@@ -620,7 +620,7 @@ struct ucc_geth_rx_global_pram {
u32 exfGlobalParam; /* base address for extended filtering global
parameters */
u8 res6[0x100 - 0xC4]; /* Initialize to zero */
-} __attribute__ ((packed));
+} __packed;
#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
@@ -639,7 +639,7 @@ struct ucc_geth_init_pram {
u32 txglobal; /* tx global */
u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
u8 res3[0x1];
-} __attribute__ ((packed));
+} __packed;
#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
@@ -661,7 +661,7 @@ struct ucc_geth_82xx_enet_address {
u16 h; /* address (MSB) */
u16 m; /* address */
u16 l; /* address (LSB) */
-} __attribute__ ((packed));
+} __packed;
/* structure representing 82xx Address Filtering PRAM */
struct ucc_geth_82xx_address_filtering_pram {
@@ -672,7 +672,7 @@ struct ucc_geth_82xx_address_filtering_pram {
struct ucc_geth_82xx_enet_address __iomem taddr;
struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
u8 res0[0x40 - 0x38];
-} __attribute__ ((packed));
+} __packed;
/* GETH Tx firmware statistics structure, used when calling
UCC_GETH_GetStatistics. */
@@ -696,7 +696,7 @@ struct ucc_geth_tx_firmware_statistics {
and 1518 octets */
u32 txpktsjumbo; /* total packets (including bad) between 1024
and MAXLength octets */
-} __attribute__ ((packed));
+} __packed;
/* GETH Rx firmware statistics structure, used when calling
UCC_GETH_GetStatistics. */
@@ -732,7 +732,7 @@ struct ucc_geth_rx_firmware_statistics {
replaced */
u32 insertvlan; /* total frames that had their VLAN tag
inserted */
-} __attribute__ ((packed));
+} __packed;
/* GETH hardware statistics structure, used when calling
UCC_GETH_GetStatistics. */
@@ -781,7 +781,7 @@ struct ucc_geth_hardware_statistics {
u32 rbca; /* Total number of frames received successfully
that had destination address equal to the
broadcast address */
-} __attribute__ ((packed));
+} __packed;
/* UCC GETH Tx errors returned via TxConf callback */
#define TX_ERRORS_DEF 0x0200
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 9516f382a6ba..aea4645be7f6 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -179,7 +179,7 @@ struct ax88172_int_data {
__le16 res2;
u8 status;
__le16 res3;
-} __attribute__ ((packed));
+} __packed;
static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 4dd23513c5af..39422f71e1d0 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -211,7 +211,7 @@ struct hso_serial_state_notification {
u16 wIndex;
u16 wLength;
u16 UART_state_bitmap;
-} __attribute__((packed));
+} __packed;
struct hso_tiocmget {
struct mutex mutex;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index d6078b8c4273..2b7b39cad1ce 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -207,7 +207,7 @@ struct kaweth_ethernet_configuration
__le16 segment_size;
__u16 max_multicast_filters;
__u8 reserved3;
-} __attribute__ ((packed));
+} __packed;
/****************************************************************
* kaweth_device
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 961a8ed38d8f..ba72a7281cb0 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -64,13 +64,13 @@ struct nc_header { // packed:
// all else is optional, and must start with:
// __le16 vendorId; // from usb-if
// __le16 productId;
-} __attribute__((__packed__));
+} __packed;
#define PAD_BYTE ((unsigned char)0xAC)
struct nc_trailer {
__le16 packet_id;
-} __attribute__((__packed__));
+} __packed;
// packets may use FLAG_FRAMING_NC and optional pad
#define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index f1942d69a0d5..ee85c8b9a858 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -165,7 +165,7 @@ struct lsi_umts {
u8 gw_addr_len; /* NW-supplied GW address len */
u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
u8 reserved[8];
-} __attribute__ ((packed));
+} __packed;
#define SIERRA_NET_LSI_COMMON_LEN 4
#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index a95c73de5824..44115eea57f9 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -643,7 +643,7 @@ int usbnet_stop (struct net_device *net)
netif_stop_queue (net);
netif_info(dev, ifdown, dev->net,
- "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+ "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index c38191179fae..f7b33ae7a703 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -193,7 +193,7 @@ struct rx_desc {
__le32 pa_low; /* Low 32 bit PCI address */
__le16 pa_high; /* Next 16 bit PCI address (48 total) */
__le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */
-} __attribute__ ((__packed__));
+} __packed;
/*
* Transmit descriptor
@@ -208,7 +208,7 @@ struct tdesc1 {
__le16 vlan;
u8 TCR;
u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */
-} __attribute__ ((__packed__));
+} __packed;
enum {
TD_QUEUE = cpu_to_le16(0x8000)
@@ -218,7 +218,7 @@ struct td_buf {
__le32 pa_low;
__le16 pa_high;
__le16 size; /* bits 0--13 - size, bit 15 - queue */
-} __attribute__ ((__packed__));
+} __packed;
struct tx_desc {
struct tdesc0 tdesc0;
@@ -1096,7 +1096,7 @@ struct mac_regs {
volatile __le16 PatternCRC[8]; /* 0xB0 */
volatile __le32 ByteMask[4][4]; /* 0xC0 */
-} __attribute__ ((__packed__));
+} __packed;
enum hw_mib {
@@ -1216,7 +1216,7 @@ struct arp_packet {
u8 ar_sip[4];
u8 ar_tha[ETH_ALEN];
u8 ar_tip[4];
-} __attribute__ ((__packed__));
+} __packed;
struct _magic_packet {
u8 dest_mac[6];
@@ -1224,7 +1224,7 @@ struct _magic_packet {
__be16 type;
u8 MAC[16][6];
u8 password[6];
-} __attribute__ ((__packed__));
+} __packed;
/*
* Store for chip context when saving and restoring status. Not
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b504bd561362..45c5dc225631 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4012,7 +4012,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
int high_dma = 0;
u64 vpath_mask = 0;
struct vxgedev *vdev;
- struct vxge_config ll_config;
+ struct vxge_config *ll_config = NULL;
struct vxge_hw_device_config *device_config = NULL;
struct vxge_hw_device_attr attr;
int i, j, no_of_vpath = 0, max_vpath_supported = 0;
@@ -4071,17 +4071,24 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
- memset(&ll_config, 0, sizeof(struct vxge_config));
- ll_config.tx_steering_type = TX_MULTIQ_STEERING;
- ll_config.intr_type = MSI_X;
- ll_config.napi_weight = NEW_NAPI_WEIGHT;
- ll_config.rth_steering = RTH_STEERING;
+ ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
+ if (!ll_config) {
+ ret = -ENOMEM;
+ vxge_debug_init(VXGE_ERR,
+ "ll_config : malloc failed %s %d",
+ __FILE__, __LINE__);
+ goto _exit0;
+ }
+ ll_config->tx_steering_type = TX_MULTIQ_STEERING;
+ ll_config->intr_type = MSI_X;
+ ll_config->napi_weight = NEW_NAPI_WEIGHT;
+ ll_config->rth_steering = RTH_STEERING;
/* get the default configuration parameters */
vxge_hw_device_config_default_get(device_config);
/* initialize configuration parameters */
- vxge_device_config_init(device_config, &ll_config.intr_type);
+ vxge_device_config_init(device_config, &ll_config->intr_type);
ret = pci_enable_device(pdev);
if (ret) {
@@ -4134,7 +4141,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
(unsigned long long)pci_resource_start(pdev, 0));
status = vxge_hw_device_hw_info_get(attr.bar0,
- &ll_config.device_hw_info);
+ &ll_config->device_hw_info);
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"%s: Reading of hardware info failed."
@@ -4143,7 +4150,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit3;
}
- if (ll_config.device_hw_info.fw_version.major !=
+ if (ll_config->device_hw_info.fw_version.major !=
VXGE_DRIVER_FW_VERSION_MAJOR) {
vxge_debug_init(VXGE_ERR,
"%s: Incorrect firmware version."
@@ -4153,7 +4160,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit3;
}
- vpath_mask = ll_config.device_hw_info.vpath_mask;
+ vpath_mask = ll_config->device_hw_info.vpath_mask;
if (vpath_mask == 0) {
vxge_debug_ll_config(VXGE_TRACE,
"%s: No vpaths available in device", VXGE_DRIVER_NAME);
@@ -4165,10 +4172,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s:%d Vpath mask = %llx", __func__, __LINE__,
(unsigned long long)vpath_mask);
- function_mode = ll_config.device_hw_info.function_mode;
- host_type = ll_config.device_hw_info.host_type;
+ function_mode = ll_config->device_hw_info.function_mode;
+ host_type = ll_config->device_hw_info.host_type;
is_privileged = __vxge_hw_device_is_privilaged(host_type,
- ll_config.device_hw_info.func_id);
+ ll_config->device_hw_info.func_id);
/* Check how many vpaths are available */
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -4182,7 +4189,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
if (is_sriov(function_mode) && (max_config_dev > 1) &&
- (ll_config.intr_type != INTA) &&
+ (ll_config->intr_type != INTA) &&
(is_privileged == VXGE_HW_OK)) {
ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
? (max_config_dev - 1) : num_vfs);
@@ -4195,7 +4202,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
* Configure vpaths and get driver configured number of vpaths
* which is less than or equal to the maximum vpaths per function.
*/
- no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config);
+ no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
if (!no_of_vpath) {
vxge_debug_ll_config(VXGE_ERR,
"%s: No more vpaths to configure", VXGE_DRIVER_NAME);
@@ -4230,21 +4237,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* set private device info */
pci_set_drvdata(pdev, hldev);
- ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
- ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
- ll_config.addr_learn_en = addr_learn_en;
- ll_config.rth_algorithm = RTH_ALG_JENKINS;
- ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
- ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config.rth_bkt_sz = RTH_BUCKET_SIZE;
- ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
-
- if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath,
+ ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
+ ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
+ ll_config->addr_learn_en = addr_learn_en;
+ ll_config->rth_algorithm = RTH_ALG_JENKINS;
+ ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
+ ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
+ ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
+ ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
+ ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+ ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+ ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
+ ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
+ ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
+
+ if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
&vdev)) {
ret = -EINVAL;
goto _exit4;
@@ -4275,7 +4282,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vdev->vpaths[j].vdev = vdev;
vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
memcpy((u8 *)vdev->vpaths[j].macaddr,
- (u8 *)ll_config.device_hw_info.mac_addrs[i],
+ ll_config->device_hw_info.mac_addrs[i],
ETH_ALEN);
/* Initialize the mac address list header */
@@ -4296,18 +4303,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
macaddr = (u8 *)vdev->vpaths[0].macaddr;
- ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
- ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
+ ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
+ ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
+ ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
- vdev->ndev->name, ll_config.device_hw_info.serial_number);
+ vdev->ndev->name, ll_config->device_hw_info.serial_number);
vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
- vdev->ndev->name, ll_config.device_hw_info.part_number);
+ vdev->ndev->name, ll_config->device_hw_info.part_number);
vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
- vdev->ndev->name, ll_config.device_hw_info.product_desc);
+ vdev->ndev->name, ll_config->device_hw_info.product_desc);
vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
vdev->ndev->name, macaddr);
@@ -4317,11 +4324,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_debug_init(VXGE_TRACE,
"%s: Firmware version : %s Date : %s", vdev->ndev->name,
- ll_config.device_hw_info.fw_version.version,
- ll_config.device_hw_info.fw_date.date);
+ ll_config->device_hw_info.fw_version.version,
+ ll_config->device_hw_info.fw_date.date);
if (new_device) {
- switch (ll_config.device_hw_info.function_mode) {
+ switch (ll_config->device_hw_info.function_mode) {
case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
vxge_debug_init(VXGE_TRACE,
"%s: Single Function Mode Enabled", vdev->ndev->name);
@@ -4344,7 +4351,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_print_parm(vdev, vpath_mask);
/* Store the fw version for ethttool option */
- strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version);
+ strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4383,7 +4390,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
* present to prevent such a failure.
*/
- if (ll_config.device_hw_info.function_mode ==
+ if (ll_config->device_hw_info.function_mode ==
VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
if (vdev->config.intr_type == INTA)
vxge_hw_device_unmask_all(hldev);
@@ -4395,6 +4402,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
vxge_hw_device_trace_level_get(hldev));
+ kfree(ll_config);
return 0;
_exit5:
@@ -4412,6 +4420,7 @@ _exit2:
_exit1:
pci_disable_device(pdev);
_exit0:
+ kfree(ll_config);
kfree(device_config);
driver_config->config_dev_cnt--;
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index e087b9a6daaa..43b77271532b 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2038,16 +2038,10 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Now copy the data to the card. */
- buf = kmalloc(wrthdr.size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf,
- ifr->ifr_data + sizeof (struct fstioc_write),
- wrthdr.size)) {
- kfree(buf);
- return -EFAULT;
- }
+ buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write),
+ wrthdr.size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size);
kfree(buf);
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
index 3839662ff201..e4f539ad071b 100644
--- a/drivers/net/wan/hd64570.h
+++ b/drivers/net/wan/hd64570.h
@@ -153,7 +153,7 @@ typedef struct {
u16 len; /* Data Length */
u8 stat; /* Status */
u8 unused; /* pads to 2-byte boundary */
-}__attribute__ ((packed)) pkt_desc;
+}__packed pkt_desc;
/* Packet Descriptor Status bits */
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index ee7083fbea50..b38ffa149aba 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -36,7 +36,7 @@ struct hdlc_header {
u8 address;
u8 control;
__be16 protocol;
-}__attribute__ ((packed));
+}__packed;
struct cisco_packet {
@@ -45,7 +45,7 @@ struct cisco_packet {
__be32 par2;
__be16 rel; /* reliability */
__be32 time;
-}__attribute__ ((packed));
+}__packed;
#define CISCO_PACKET_LEN 18
#define CISCO_BIG_PACKET_LEN 20
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0e52993e2079..0edb535bb2b5 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -112,7 +112,7 @@ typedef struct {
unsigned de: 1;
unsigned ea2: 1;
#endif
-}__attribute__ ((packed)) fr_hdr;
+}__packed fr_hdr;
typedef struct pvc_device_struct {
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 43ae6f440bfb..f4125da2762f 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -330,7 +330,7 @@ struct _dlci_stat
{
short dlci;
char flags;
-} __attribute__((packed));
+} __packed;
struct _frad_stat
{
@@ -1211,14 +1211,9 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
}
else
{
- temp = kmalloc(mem.len, GFP_KERNEL);
- if (!temp)
- return(-ENOMEM);
- if(copy_from_user(temp, mem.data, mem.len))
- {
- kfree(temp);
- return -EFAULT;
- }
+ temp = memdup_user(mem.data, mem.len);
+ if (IS_ERR(temp))
+ return PTR_ERR(temp);
sdla_write(dev, mem.addr, temp, mem.len);
kfree(temp);
}
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index d86e8f31e7fc..2f725d0cc762 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -848,7 +848,7 @@ struct i2400m_cmd_enter_power_save {
struct i2400m_l3l4_hdr hdr;
struct i2400m_tlv_hdr tlv;
__le32 val;
-} __attribute__((packed));
+} __packed;
/*
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 11491354e5b5..8b55a5b14152 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -651,7 +651,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
struct {
struct i2400m_bootrom_header cmd;
u8 cmd_payload[chunk_len];
- } __attribute__((packed)) *buf;
+ } __packed *buf;
struct i2400m_bootrom_header ack;
d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
@@ -794,7 +794,7 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
struct {
struct i2400m_bootrom_header cmd;
u8 cmd_pl[0];
- } __attribute__((packed)) *cmd_buf;
+ } __packed *cmd_buf;
size_t signature_block_offset, signature_block_size;
d_fnstart(3, dev, "offset %zu\n", offset);
@@ -1029,7 +1029,7 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
struct {
struct i2400m_bootrom_header ack;
u8 ack_pl[16];
- } __attribute__((packed)) ack_buf;
+ } __packed ack_buf;
d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
cmd = i2400m->bm_cmd_buf;
@@ -1115,7 +1115,7 @@ int i2400m_dnload_init_signed(struct i2400m *i2400m,
struct {
struct i2400m_bootrom_header cmd;
struct i2400m_bcf_hdr cmd_pl;
- } __attribute__((packed)) *cmd_buf;
+ } __packed *cmd_buf;
struct i2400m_bootrom_header ack;
d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr);
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 035e4cf3e6ed..9e02b90b0080 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -91,7 +91,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
struct {
struct i2400m_l3l4_hdr hdr;
struct i2400m_tlv_rf_operation sw_rf;
- } __attribute__((packed)) *cmd;
+ } __packed *cmd;
char strerr[32];
d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state);
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index b07e4d3a6b4d..bbc10b1cde87 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -80,7 +80,7 @@ struct adm8211_csr {
__le32 FEMR; /* 0x104 */
__le32 FPSR; /* 0x108 */
__le32 FFER; /* 0x10C */
-} __attribute__ ((packed));
+} __packed;
/* CSR0 - PAR (PCI Address Register) */
#define ADM8211_PAR_MWIE (1 << 24)
@@ -484,7 +484,7 @@ struct adm8211_tx_hdr {
u8 entry_control; // huh??
u16 reserved_1;
u32 reserved_2;
-} __attribute__ ((packed));
+} __packed;
#define RX_COPY_BREAK 128
@@ -531,7 +531,7 @@ struct adm8211_eeprom {
u8 lnags_threshold[14]; /* 0x70 */
__le16 checksum; /* 0x7E */
u8 cis_data[0]; /* 0x80, 384 bytes */
-} __attribute__ ((packed));
+} __packed;
struct adm8211_priv {
struct pci_dev *pdev;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 3b7ab20a5c54..6b605df8a923 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -506,20 +506,20 @@ struct WepKeyRid {
u8 mac[ETH_ALEN];
__le16 klen;
u8 key[16];
-} __attribute__ ((packed));
+} __packed;
/* These structures are from the Aironet's PC4500 Developers Manual */
typedef struct Ssid Ssid;
struct Ssid {
__le16 len;
u8 ssid[32];
-} __attribute__ ((packed));
+} __packed;
typedef struct SsidRid SsidRid;
struct SsidRid {
__le16 len;
Ssid ssids[3];
-} __attribute__ ((packed));
+} __packed;
typedef struct ModulationRid ModulationRid;
struct ModulationRid {
@@ -528,7 +528,7 @@ struct ModulationRid {
#define MOD_DEFAULT cpu_to_le16(0)
#define MOD_CCK cpu_to_le16(1)
#define MOD_MOK cpu_to_le16(2)
-} __attribute__ ((packed));
+} __packed;
typedef struct ConfigRid ConfigRid;
struct ConfigRid {
@@ -652,7 +652,7 @@ struct ConfigRid {
#define MAGIC_STAY_IN_CAM (1<<10)
u8 magicControl;
__le16 autoWake;
-} __attribute__ ((packed));
+} __packed;
typedef struct StatusRid StatusRid;
struct StatusRid {
@@ -711,20 +711,20 @@ struct StatusRid {
#define STAT_LEAPFAILED 91
#define STAT_LEAPTIMEDOUT 92
#define STAT_LEAPCOMPLETE 93
-} __attribute__ ((packed));
+} __packed;
typedef struct StatsRid StatsRid;
struct StatsRid {
__le16 len;
__le16 spacer;
__le32 vals[100];
-} __attribute__ ((packed));
+} __packed;
typedef struct APListRid APListRid;
struct APListRid {
__le16 len;
u8 ap[4][ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
typedef struct CapabilityRid CapabilityRid;
struct CapabilityRid {
@@ -754,7 +754,7 @@ struct CapabilityRid {
__le16 bootBlockVer;
__le16 requiredHard;
__le16 extSoftCap;
-} __attribute__ ((packed));
+} __packed;
/* Only present on firmware >= 5.30.17 */
typedef struct BSSListRidExtra BSSListRidExtra;
@@ -762,7 +762,7 @@ struct BSSListRidExtra {
__le16 unknown[4];
u8 fixed[12]; /* WLAN management frame */
u8 iep[624];
-} __attribute__ ((packed));
+} __packed;
typedef struct BSSListRid BSSListRid;
struct BSSListRid {
@@ -796,7 +796,7 @@ struct BSSListRid {
/* Only present on firmware >= 5.30.17 */
BSSListRidExtra extra;
-} __attribute__ ((packed));
+} __packed;
typedef struct {
BSSListRid bss;
@@ -807,13 +807,13 @@ typedef struct tdsRssiEntry tdsRssiEntry;
struct tdsRssiEntry {
u8 rssipct;
u8 rssidBm;
-} __attribute__ ((packed));
+} __packed;
typedef struct tdsRssiRid tdsRssiRid;
struct tdsRssiRid {
u16 len;
tdsRssiEntry x[256];
-} __attribute__ ((packed));
+} __packed;
typedef struct MICRid MICRid;
struct MICRid {
@@ -823,7 +823,7 @@ struct MICRid {
u8 multicast[16];
__le16 unicastValid;
u8 unicast[16];
-} __attribute__ ((packed));
+} __packed;
typedef struct MICBuffer MICBuffer;
struct MICBuffer {
@@ -841,7 +841,7 @@ struct MICBuffer {
} u;
__be32 mic;
__be32 seq;
-} __attribute__ ((packed));
+} __packed;
typedef struct {
u8 da[ETH_ALEN];
@@ -996,7 +996,7 @@ struct rx_hdr {
u8 rate;
u8 freq;
__le16 tmp[4];
-} __attribute__ ((packed));
+} __packed;
typedef struct {
unsigned int ctl: 15;
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 8a2d4afc74f8..429b281d40d1 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -305,7 +305,7 @@ struct dfu_status {
unsigned char poll_timeout[3];
unsigned char state;
unsigned char string;
-} __attribute__((packed));
+} __packed;
static inline int at76_is_intersil(enum board_type board)
{
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 1ec5ccffdbc0..972ea0fc1a0b 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -99,7 +99,7 @@ struct hwcfg_r505 {
u8 reserved2[14];
u8 cr15_values[14];
u8 reserved3[3];
-} __attribute__((packed));
+} __packed;
struct hwcfg_rfmd {
u8 cr20_values[14];
@@ -111,7 +111,7 @@ struct hwcfg_rfmd {
u8 low_power_values[14];
u8 normal_power_values[14];
u8 reserved1[3];
-} __attribute__((packed));
+} __packed;
struct hwcfg_intersil {
u8 mac_addr[ETH_ALEN];
@@ -120,7 +120,7 @@ struct hwcfg_intersil {
u8 pidvid[4];
u8 regulatory_domain;
u8 reserved[1];
-} __attribute__((packed));
+} __packed;
union at76_hwcfg {
struct hwcfg_intersil i;
@@ -149,14 +149,14 @@ struct at76_card_config {
u8 ssid_len;
u8 short_preamble;
__le16 beacon_period;
-} __attribute__((packed));
+} __packed;
struct at76_command {
u8 cmd;
u8 reserved;
__le16 size;
u8 data[0];
-} __attribute__((packed));
+} __packed;
/* Length of Atmel-specific Rx header before 802.11 frame */
#define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet)
@@ -171,7 +171,7 @@ struct at76_rx_buffer {
u8 noise_level;
__le32 rx_time;
u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
-} __attribute__((packed));
+} __packed;
/* Length of Atmel-specific Tx header before 802.11 frame */
#define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet)
@@ -182,7 +182,7 @@ struct at76_tx_buffer {
u8 padding;
u8 reserved[4];
u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
-} __attribute__((packed));
+} __packed;
/* defines for scan_type below */
#define SCAN_TYPE_ACTIVE 0
@@ -198,7 +198,7 @@ struct at76_req_scan {
__le16 max_channel_time;
u8 essid_size;
u8 international_scan;
-} __attribute__((packed));
+} __packed;
struct at76_req_ibss {
u8 bssid[ETH_ALEN];
@@ -207,7 +207,7 @@ struct at76_req_ibss {
u8 channel;
u8 essid_size;
u8 reserved[3];
-} __attribute__((packed));
+} __packed;
struct at76_req_join {
u8 bssid[ETH_ALEN];
@@ -217,7 +217,7 @@ struct at76_req_join {
__le16 timeout;
u8 essid_size;
u8 reserved;
-} __attribute__((packed));
+} __packed;
struct set_mib_buffer {
u8 type;
@@ -229,7 +229,7 @@ struct set_mib_buffer {
__le16 word;
u8 addr[ETH_ALEN];
} data;
-} __attribute__((packed));
+} __packed;
struct mib_local {
u16 reserved0;
@@ -241,14 +241,14 @@ struct mib_local {
u16 reserved2;
u8 preamble_type;
u16 reserved3;
-} __attribute__((packed));
+} __packed;
struct mib_mac_addr {
u8 mac_addr[ETH_ALEN];
u8 res[2]; /* ??? */
u8 group_addr[4][ETH_ALEN];
u8 group_addr_status[4];
-} __attribute__((packed));
+} __packed;
struct mib_mac {
__le32 max_tx_msdu_lifetime;
@@ -269,7 +269,7 @@ struct mib_mac {
u8 desired_bssid[ETH_ALEN];
u8 desired_bsstype; /* ad-hoc or infrastructure */
u8 reserved2;
-} __attribute__((packed));
+} __packed;
struct mib_mac_mgmt {
__le16 beacon_period;
@@ -292,7 +292,7 @@ struct mib_mac_mgmt {
u8 multi_domain_capability_enabled;
u8 country_string[3];
u8 reserved[3];
-} __attribute__((packed));
+} __packed;
struct mib_mac_wep {
u8 privacy_invoked; /* 0 disable encr., 1 enable encr */
@@ -303,7 +303,7 @@ struct mib_mac_wep {
__le32 wep_excluded_count;
u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN];
u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */
-} __attribute__((packed));
+} __packed;
struct mib_phy {
__le32 ed_threshold;
@@ -320,19 +320,19 @@ struct mib_phy {
u8 current_cca_mode;
u8 phy_type;
u8 current_reg_domain;
-} __attribute__((packed));
+} __packed;
struct mib_fw_version {
u8 major;
u8 minor;
u8 patch;
u8 build;
-} __attribute__((packed));
+} __packed;
struct mib_mdomain {
u8 tx_powerlevel[14];
u8 channel_list[14]; /* 0 for invalid channels */
-} __attribute__((packed));
+} __packed;
struct at76_fw_header {
__le32 crc; /* CRC32 of the whole image */
@@ -346,7 +346,7 @@ struct at76_fw_header {
__le32 int_fw_len; /* internal firmware image length */
__le32 ext_fw_offset; /* external firmware image offset */
__le32 ext_fw_len; /* external firmware image length */
-} __attribute__((packed));
+} __packed;
/* a description of a regulatory domain and the allowed channels */
struct reg_domain {
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index cc09595b781a..2242a140e4fe 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -13,5 +13,6 @@ ath5k-y += base.o
ath5k-y += led.o
ath5k-y += rfkill.o
ath5k-y += ani.o
+ath5k-y += sysfs.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index f2311ab35504..26dbe65fedb0 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -74,8 +74,8 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
const s8 fr[] = { -78, -80 };
#endif
if (level < 0 || level >= ARRAY_SIZE(sz)) {
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
- "level out of range %d", level);
+ ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range",
+ level);
return;
}
@@ -106,8 +106,8 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
if (level < 0 || level >= ARRAY_SIZE(val) ||
level > ah->ah_sc->ani_state.max_spur_level) {
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
- "level out of range %d", level);
+ ATH5K_ERR(ah->ah_sc, "spur immunity level %d out of range",
+ level);
return;
}
@@ -130,8 +130,7 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
const int val[] = { 0, 4, 8 };
if (level < 0 || level >= ARRAY_SIZE(val)) {
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
- "level out of range %d", level);
+ ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level);
return;
}
@@ -481,14 +480,15 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
int listen, ofdm_high, ofdm_low, cck_high, cck_low;
- if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
- return;
-
/* get listen time since last call and add it to the counter because we
- * might not have restarted the "ani period" last time */
+ * might not have restarted the "ani period" last time.
+ * always do this to calculate the busy time also in manual mode */
listen = ath5k_hw_ani_get_listen_time(ah, as);
as->listen_time += listen;
+ if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
ath5k_ani_save_and_clear_phy_errors(ah, as);
ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 2785946f659a..ea6362a8988d 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -204,6 +204,7 @@
#define AR5K_TUNE_TPC_TXPOWER false
#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -565,7 +566,7 @@ enum ath5k_pkt_type {
)
/*
- * DMA size definitions (2^n+2)
+ * DMA size definitions (2^(n+2))
*/
enum ath5k_dmasize {
AR5K_DMASIZE_4B = 0,
@@ -1118,6 +1119,7 @@ struct ath5k_hw {
/* Calibration timestamp */
unsigned long ah_cal_next_full;
unsigned long ah_cal_next_ani;
+ unsigned long ah_cal_next_nf;
/* Calibration mask */
u8 ah_cal_mask;
@@ -1125,15 +1127,10 @@ struct ath5k_hw {
/*
* Function pointers
*/
- int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
- u32 size, unsigned int flags);
int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
unsigned int, unsigned int, int, enum ath5k_pkt_type,
unsigned int, unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int, unsigned int);
- int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
- unsigned int, unsigned int, unsigned int, unsigned int,
- unsigned int, unsigned int);
int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
struct ath5k_tx_status *);
int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *,
@@ -1148,6 +1145,9 @@ struct ath5k_hw {
int ath5k_hw_attach(struct ath5k_softc *sc);
void ath5k_hw_detach(struct ath5k_hw *ah);
+int ath5k_sysfs_register(struct ath5k_softc *sc);
+void ath5k_sysfs_unregister(struct ath5k_softc *sc);
+
/* LED functions */
int ath5k_init_leds(struct ath5k_softc *sc);
void ath5k_led_enable(struct ath5k_softc *sc);
@@ -1231,6 +1231,11 @@ int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
/* Hardware Descriptor Functions */
int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
+int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+ u32 size, unsigned int flags);
+int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+ unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
+ u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3);
/* GPIO Functions */
void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
@@ -1270,6 +1275,7 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel);
+void ath5k_hw_update_noise_floor(struct ath5k_hw *ah);
/* Spur mitigation */
bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
struct ieee80211_channel *channel);
@@ -1280,6 +1286,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* Antenna control */
void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
+void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode);
/* TX power setup */
int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
u8 ee_mode, u8 txpower);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 31c008042bfe..b32e28caeee2 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -352,8 +352,6 @@ err_free:
*/
void ath5k_hw_detach(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
-
__set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
if (ah->ah_rf_banks != NULL)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 648972df369d..20328bdd138b 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -311,7 +311,8 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
static int ath5k_txbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf,
struct ath5k_txq *txq, int padsize);
-static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
+
+static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
struct ath5k_buf *bf)
{
BUG_ON(!bf);
@@ -321,9 +322,11 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(bf->skb);
bf->skb = NULL;
+ bf->skbaddr = 0;
+ bf->desc->ds_data = 0;
}
-static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
+static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
struct ath5k_buf *bf)
{
struct ath5k_hw *ah = sc->ah;
@@ -336,6 +339,8 @@ static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(bf->skb);
bf->skb = NULL;
+ bf->skbaddr = 0;
+ bf->desc->ds_data = 0;
}
@@ -352,7 +357,6 @@ static void ath5k_txq_release(struct ath5k_softc *sc);
static int ath5k_rx_start(struct ath5k_softc *sc);
static void ath5k_rx_stop(struct ath5k_softc *sc);
static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
- struct ath5k_desc *ds,
struct sk_buff *skb,
struct ath5k_rx_status *rs);
static void ath5k_tasklet_rx(unsigned long data);
@@ -578,7 +582,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
spin_lock_init(&sc->block);
/* Set private data */
- pci_set_drvdata(pdev, hw);
+ pci_set_drvdata(pdev, sc);
/* Setup interrupt handler */
ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
@@ -694,25 +698,23 @@ err:
static void __devexit
ath5k_pci_remove(struct pci_dev *pdev)
{
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_softc *sc = pci_get_drvdata(pdev);
ath5k_debug_finish_device(sc);
- ath5k_detach(pdev, hw);
+ ath5k_detach(pdev, sc->hw);
ath5k_hw_detach(sc->ah);
kfree(sc->ah);
free_irq(pdev->irq, sc);
pci_iounmap(pdev, sc->iobase);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
- ieee80211_free_hw(hw);
+ ieee80211_free_hw(sc->hw);
}
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
- struct ieee80211_hw *hw = pci_get_drvdata(to_pci_dev(dev));
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
ath5k_led_off(sc);
return 0;
@@ -721,8 +723,7 @@ static int ath5k_pci_suspend(struct device *dev)
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_softc *sc = pci_get_drvdata(pdev);
/*
* Suspend/Resume resets the PCI configuration space, so we have to
@@ -768,7 +769,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
* return false w/o doing anything. MAC's that do
* support it will return true w/o doing anything.
*/
- ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
+ ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
+
if (ret < 0)
goto err;
if (ret > 0)
@@ -864,6 +866,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
ath5k_init_leds(sc);
+ ath5k_sysfs_register(sc);
+
return 0;
err_queues:
ath5k_txq_release(sc);
@@ -899,6 +903,7 @@ ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
ath5k_unregister_leds(sc);
+ ath5k_sysfs_unregister(sc);
/*
* NB: can't reclaim these until after ieee80211_ifdetach
* returns because we'll get called back to reclaim node
@@ -1111,8 +1116,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
static int
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
{
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n",
- sc->curchan->center_freq, chan->center_freq);
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ "channel set, resetting (%u -> %u MHz)\n",
+ sc->curchan->center_freq, chan->center_freq);
/*
* To switch channels clear any pending DMA operations;
@@ -1228,21 +1234,23 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
* not get overrun under high load (as can happen with a
* 5212 when ANI processing enables PHY error frames).
*
- * To insure the last descriptor is self-linked we create
+ * To ensure the last descriptor is self-linked we create
* each descriptor as self-linked and add it to the end. As
* each additional descriptor is added the previous self-linked
- * entry is ``fixed'' naturally. This should be safe even
+ * entry is "fixed" naturally. This should be safe even
* if DMA is happening. When processing RX interrupts we
* never remove/process the last, self-linked, entry on the
- * descriptor list. This insures the hardware always has
+ * descriptor list. This ensures the hardware always has
* someplace to write a new frame.
*/
ds = bf->desc;
ds->ds_link = bf->daddr; /* link to self */
ds->ds_data = bf->skbaddr;
- ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
- if (ret)
+ ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
+ if (ret) {
+ ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
return ret;
+ }
if (sc->rxlink != NULL)
*sc->rxlink = bf->daddr;
@@ -1347,7 +1355,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
mrr_tries[i] = info->control.rates[i + 1].count;
}
- ah->ah_setup_mrr_tx_desc(ah, ds,
+ ath5k_hw_setup_mrr_tx_desc(ah, ds,
mrr_rate[0], mrr_tries[0],
mrr_rate[1], mrr_tries[1],
mrr_rate[2], mrr_tries[2]);
@@ -1443,17 +1451,20 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
{
struct ath5k_buf *bf;
- ath5k_txbuf_free(sc, sc->bbuf);
+ ath5k_txbuf_free_skb(sc, sc->bbuf);
list_for_each_entry(bf, &sc->txbuf, list)
- ath5k_txbuf_free(sc, bf);
+ ath5k_txbuf_free_skb(sc, bf);
list_for_each_entry(bf, &sc->rxbuf, list)
- ath5k_rxbuf_free(sc, bf);
+ ath5k_rxbuf_free_skb(sc, bf);
/* Free memory associated with all descriptors */
pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
+ sc->desc = NULL;
+ sc->desc_daddr = 0;
kfree(sc->bufptr);
sc->bufptr = NULL;
+ sc->bbuf = NULL;
}
@@ -1602,7 +1613,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ath5k_debug_printtxbuf(sc, bf);
- ath5k_txbuf_free(sc, bf);
+ ath5k_txbuf_free_skb(sc, bf);
spin_lock_bh(&sc->txbuflock);
list_move_tail(&bf->list, &sc->txbuf);
@@ -1721,8 +1732,8 @@ ath5k_rx_stop(struct ath5k_softc *sc)
}
static unsigned int
-ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
- struct sk_buff *skb, struct ath5k_rx_status *rs)
+ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
@@ -1889,9 +1900,138 @@ static int ath5k_remove_padding(struct sk_buff *skb)
}
static void
-ath5k_tasklet_rx(unsigned long data)
+ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
+ struct ath5k_rx_status *rs)
{
struct ieee80211_rx_status *rxs;
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - hdrlen % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ ath5k_remove_padding(skb);
+
+ rxs = IEEE80211_SKB_RXCB(skb);
+
+ rxs->flag = 0;
+ if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+
+ /*
+ * always extend the mac timestamp, since this information is
+ * also needed for proper IBSS merging.
+ *
+ * XXX: it might be too late to do it here, since rs_tstamp is
+ * 15bit only. that means TSF extension has to be done within
+ * 32768usec (about 32ms). it might be necessary to move this to
+ * the interrupt handler, like it is done in madwifi.
+ *
+ * Unfortunately we don't know when the hardware takes the rx
+ * timestamp (beginning of phy frame, data frame, end of rx?).
+ * The only thing we know is that it is hardware specific...
+ * On AR5213 it seems the rx timestamp is at the end of the
+ * frame, but i'm not sure.
+ *
+ * NOTE: mac80211 defines mactime at the beginning of the first
+ * data symbol. Since we don't have any time references it's
+ * impossible to comply to that. This affects IBSS merge only
+ * right now, so it's not too bad...
+ */
+ rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
+ rxs->flag |= RX_FLAG_TSFT;
+
+ rxs->freq = sc->curchan->center_freq;
+ rxs->band = sc->curband->band;
+
+ rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
+
+ rxs->antenna = rs->rs_antenna;
+
+ if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
+ sc->stats.antenna_rx[rs->rs_antenna]++;
+ else
+ sc->stats.antenna_rx[0]++; /* invalid */
+
+ rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
+ rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
+
+ if (rxs->rate_idx >= 0 && rs->rs_rate ==
+ sc->curband->bitrates[rxs->rate_idx].hw_value_short)
+ rxs->flag |= RX_FLAG_SHORTPRE;
+
+ ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+
+ ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
+
+ /* check beacons in IBSS mode */
+ if (sc->opmode == NL80211_IFTYPE_ADHOC)
+ ath5k_check_ibss_tsf(sc, skb, rxs);
+
+ ieee80211_rx(sc->hw, skb);
+}
+
+/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
+ *
+ * Check if we want to further process this frame or not. Also update
+ * statistics. Return true if we want this frame, false if not.
+ */
+static bool
+ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
+{
+ sc->stats.rx_all_count++;
+
+ if (unlikely(rs->rs_status)) {
+ if (rs->rs_status & AR5K_RXERR_CRC)
+ sc->stats.rxerr_crc++;
+ if (rs->rs_status & AR5K_RXERR_FIFO)
+ sc->stats.rxerr_fifo++;
+ if (rs->rs_status & AR5K_RXERR_PHY) {
+ sc->stats.rxerr_phy++;
+ if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
+ sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
+ return false;
+ }
+ if (rs->rs_status & AR5K_RXERR_DECRYPT) {
+ /*
+ * Decrypt error. If the error occurred
+ * because there was no hardware key, then
+ * let the frame through so the upper layers
+ * can process it. This is necessary for 5210
+ * parts which have no way to setup a ``clear''
+ * key cache entry.
+ *
+ * XXX do key cache faulting
+ */
+ sc->stats.rxerr_decrypt++;
+ if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
+ !(rs->rs_status & AR5K_RXERR_CRC))
+ return true;
+ }
+ if (rs->rs_status & AR5K_RXERR_MIC) {
+ sc->stats.rxerr_mic++;
+ return true;
+ }
+
+ /* let crypto-error packets fall through in MNTR */
+ if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
+ sc->opmode != NL80211_IFTYPE_MONITOR)
+ return false;
+ }
+
+ if (unlikely(rs->rs_more)) {
+ sc->stats.rxerr_jumbo++;
+ return false;
+ }
+ return true;
+}
+
+static void
+ath5k_tasklet_rx(unsigned long data)
+{
struct ath5k_rx_status rs = {};
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
@@ -1901,7 +2041,6 @@ ath5k_tasklet_rx(unsigned long data)
struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
- int rx_flag;
spin_lock(&sc->rxbuflock);
if (list_empty(&sc->rxbuf)) {
@@ -1909,8 +2048,6 @@ ath5k_tasklet_rx(unsigned long data)
goto unlock;
}
do {
- rx_flag = 0;
-
bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
BUG_ON(bf->skb == NULL);
skb = bf->skb;
@@ -1926,137 +2063,30 @@ ath5k_tasklet_rx(unsigned long data)
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error in processing rx descriptor\n");
sc->stats.rxerr_proc++;
- spin_unlock(&sc->rxbuflock);
- return;
+ break;
}
- sc->stats.rx_all_count++;
-
- if (unlikely(rs.rs_status)) {
- if (rs.rs_status & AR5K_RXERR_CRC)
- sc->stats.rxerr_crc++;
- if (rs.rs_status & AR5K_RXERR_FIFO)
- sc->stats.rxerr_fifo++;
- if (rs.rs_status & AR5K_RXERR_PHY) {
- sc->stats.rxerr_phy++;
- if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
- sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
- goto next;
- }
- if (rs.rs_status & AR5K_RXERR_DECRYPT) {
- /*
- * Decrypt error. If the error occurred
- * because there was no hardware key, then
- * let the frame through so the upper layers
- * can process it. This is necessary for 5210
- * parts which have no way to setup a ``clear''
- * key cache entry.
- *
- * XXX do key cache faulting
- */
- sc->stats.rxerr_decrypt++;
- if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
- !(rs.rs_status & AR5K_RXERR_CRC))
- goto accept;
- }
- if (rs.rs_status & AR5K_RXERR_MIC) {
- rx_flag |= RX_FLAG_MMIC_ERROR;
- sc->stats.rxerr_mic++;
- goto accept;
- }
+ if (ath5k_receive_frame_ok(sc, &rs)) {
+ next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
- /* let crypto-error packets fall through in MNTR */
- if ((rs.rs_status &
- ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
- sc->opmode != NL80211_IFTYPE_MONITOR)
+ /*
+ * If we can't replace bf->skb with a new skb under
+ * memory pressure, just skip this packet
+ */
+ if (!next_skb)
goto next;
- }
-
- if (unlikely(rs.rs_more)) {
- sc->stats.rxerr_jumbo++;
- goto next;
-
- }
-accept:
- next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
-
- /*
- * If we can't replace bf->skb with a new skb under memory
- * pressure, just skip this packet
- */
- if (!next_skb)
- goto next;
-
- pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
- PCI_DMA_FROMDEVICE);
- skb_put(skb, rs.rs_datalen);
-
- /* The MAC header is padded to have 32-bit boundary if the
- * packet payload is non-zero. The general calculation for
- * padsize would take into account odd header lengths:
- * padsize = (4 - hdrlen % 4) % 4; However, since only
- * even-length headers are used, padding can only be 0 or 2
- * bytes and we can optimize this a bit. In addition, we must
- * not try to remove padding from short control frames that do
- * not have payload. */
- ath5k_remove_padding(skb);
-
- rxs = IEEE80211_SKB_RXCB(skb);
-
- /*
- * always extend the mac timestamp, since this information is
- * also needed for proper IBSS merging.
- *
- * XXX: it might be too late to do it here, since rs_tstamp is
- * 15bit only. that means TSF extension has to be done within
- * 32768usec (about 32ms). it might be necessary to move this to
- * the interrupt handler, like it is done in madwifi.
- *
- * Unfortunately we don't know when the hardware takes the rx
- * timestamp (beginning of phy frame, data frame, end of rx?).
- * The only thing we know is that it is hardware specific...
- * On AR5213 it seems the rx timestamp is at the end of the
- * frame, but i'm not sure.
- *
- * NOTE: mac80211 defines mactime at the beginning of the first
- * data symbol. Since we don't have any time references it's
- * impossible to comply to that. This affects IBSS merge only
- * right now, so it's not too bad...
- */
- rxs->mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp);
- rxs->flag = rx_flag | RX_FLAG_TSFT;
-
- rxs->freq = sc->curchan->center_freq;
- rxs->band = sc->curband->band;
-
- rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
-
- rxs->antenna = rs.rs_antenna;
-
- if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
- sc->stats.antenna_rx[rs.rs_antenna]++;
- else
- sc->stats.antenna_rx[0]++; /* invalid */
-
- rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
- rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
-
- if (rxs->rate_idx >= 0 && rs.rs_rate ==
- sc->curband->bitrates[rxs->rate_idx].hw_value_short)
- rxs->flag |= RX_FLAG_SHORTPRE;
- ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+ pci_unmap_single(sc->pdev, bf->skbaddr,
+ common->rx_bufsize,
+ PCI_DMA_FROMDEVICE);
- ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
+ skb_put(skb, rs.rs_datalen);
- /* check beacons in IBSS mode */
- if (sc->opmode == NL80211_IFTYPE_ADHOC)
- ath5k_check_ibss_tsf(sc, skb, rxs);
+ ath5k_receive_frame(sc, skb, &rs);
- ieee80211_rx(sc->hw, skb);
-
- bf->skb = next_skb;
- bf->skbaddr = next_skb_addr;
+ bf->skb = next_skb;
+ bf->skbaddr = next_skb_addr;
+ }
next:
list_move_tail(&bf->list, &sc->rxbuf);
} while (ath5k_rxbuf_setup(sc, bf) == 0);
@@ -2065,8 +2095,6 @@ unlock:
}
-
-
/*************\
* TX Handling *
\*************/
@@ -2298,6 +2326,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
"stuck beacon time (%u missed)\n",
sc->bmisscount);
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ "stuck beacon, resetting\n");
tasklet_schedule(&sc->restq);
}
return;
@@ -2647,7 +2677,7 @@ ath5k_stop_hw(struct ath5k_softc *sc)
ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
"putting device to sleep\n");
}
- ath5k_txbuf_free(sc, sc->bbuf);
+ ath5k_txbuf_free_skb(sc, sc->bbuf);
mmiowb();
mutex_unlock(&sc->lock);
@@ -2705,6 +2735,8 @@ ath5k_intr(int irq, void *dev_id)
* Fatal errors are unrecoverable.
* Typically these are caused by DMA errors.
*/
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ "fatal int, resetting\n");
tasklet_schedule(&sc->restq);
} else if (unlikely(status & AR5K_INT_RXORN)) {
/*
@@ -2717,8 +2749,11 @@ ath5k_intr(int irq, void *dev_id)
* this guess is copied from the HAL.
*/
sc->stats.rxorn_intr++;
- if (ah->ah_mac_srev < AR5K_SREV_AR5212)
+ if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ "rx overrun, resetting\n");
tasklet_schedule(&sc->restq);
+ }
else
tasklet_schedule(&sc->rxtq);
} else {
@@ -2785,10 +2820,6 @@ ath5k_tasklet_calibrate(unsigned long data)
/* Only full calibration for now */
ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
- /* Stop queues so that calibration
- * doesn't interfere with tx */
- ieee80211_stop_queues(sc->hw);
-
ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
ieee80211_frequency_to_channel(sc->curchan->center_freq),
sc->curchan->hw_value);
@@ -2806,8 +2837,16 @@ ath5k_tasklet_calibrate(unsigned long data)
ieee80211_frequency_to_channel(
sc->curchan->center_freq));
- /* Wake queues */
- ieee80211_wake_queues(sc->hw);
+ /* Noise floor calibration interrupts rx/tx path while I/Q calibration
+ * doesn't. We stop the queues so that calibration doesn't interfere
+ * with TX and don't run it as often */
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
+ ah->ah_cal_next_nf = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
+ ieee80211_stop_queues(sc->hw);
+ ath5k_hw_update_noise_floor(ah);
+ ieee80211_wake_queues(sc->hw);
+ }
ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
}
@@ -2926,6 +2965,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
+ ah->ah_cal_next_full = jiffies;
+ ah->ah_cal_next_ani = jiffies;
+ ah->ah_cal_next_nf = jiffies;
+
/*
* Change channels and update the h/w rate map if we're switching;
* e.g. 11a to 11b/g.
@@ -3360,7 +3403,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
ath5k_debug_dump_skb(sc, skb, "BC ", 1);
- ath5k_txbuf_free(sc, sc->bbuf);
+ ath5k_txbuf_free_skb(sc, sc->bbuf);
sc->bbuf->skb = skb;
ret = ath5k_beacon_setup(sc, sc->bbuf);
if (ret)
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 74f007126f41..beae519aa735 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -34,7 +34,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
{
u16 ee_header;
- ATH5K_TRACE(ah->ah_sc);
/* Capabilities stored in the EEPROM */
ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
@@ -123,8 +122,6 @@ int ath5k_hw_get_capability(struct ath5k_hw *ah,
enum ath5k_capability_type cap_type,
u32 capability, u32 *result)
{
- ATH5K_TRACE(ah->ah_sc);
-
switch (cap_type) {
case AR5K_CAP_NUM_TXQUEUES:
if (result) {
@@ -173,8 +170,6 @@ yes:
int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
u16 assoc_id)
{
- ATH5K_TRACE(ah->ah_sc);
-
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
@@ -186,8 +181,6 @@ int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
-
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 6fb5c5ffa5b1..8c638865c712 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -278,6 +278,7 @@ static ssize_t write_file_reset(struct file *file,
size_t count, loff_t *ppos)
{
struct ath5k_softc *sc = file->private_data;
+ ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
tasklet_schedule(&sc->restq);
return count;
}
@@ -307,7 +308,6 @@ static const struct {
{ ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
{ ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
- { ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
{ ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
};
@@ -426,6 +426,13 @@ static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
"AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
(v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
+
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -729,6 +736,66 @@ static const struct file_operations fops_ani = {
};
+/* debugfs: queues etc */
+
+static ssize_t read_file_queue(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[700];
+ unsigned int len = 0;
+
+ struct ath5k_txq *txq;
+ struct ath5k_buf *bf, *bf0;
+ int i, n = 0;
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "available txbuffers: %d\n", sc->txbuf_len);
+
+ for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
+ txq = &sc->txqs[i];
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "%02d: %ssetup\n", i, txq->setup ? "" : "not ");
+
+ if (!txq->setup)
+ continue;
+
+ list_for_each_entry_safe(bf, bf0, &txq->q, list)
+ n++;
+ len += snprintf(buf+len, sizeof(buf)-len, " len: %d\n", n);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_queue(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "start", 5) == 0)
+ ieee80211_wake_queues(sc->hw);
+ else if (strncmp(buf, "stop", 4) == 0)
+ ieee80211_stop_queues(sc->hw);
+
+ return count;
+}
+
+
+static const struct file_operations fops_queue = {
+ .read = read_file_queue,
+ .write = write_file_queue,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
/* init */
void
@@ -772,6 +839,11 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
S_IWUSR | S_IRUSR,
sc->debug.debugfs_phydir, sc,
&fops_ani);
+
+ sc->debug.debugfs_queue = debugfs_create_file("queue",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc,
+ &fops_queue);
}
void
@@ -790,6 +862,7 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
debugfs_remove(sc->debug.debugfs_antenna);
debugfs_remove(sc->debug.debugfs_frameerrors);
debugfs_remove(sc->debug.debugfs_ani);
+ debugfs_remove(sc->debug.debugfs_queue);
debugfs_remove(sc->debug.debugfs_phydir);
}
@@ -852,7 +925,7 @@ ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
ds, (unsigned long long)bf->daddr,
ds->ds_link, ds->ds_data,
rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1,
- rd->u.rx_stat.rx_status_0, rd->u.rx_stat.rx_status_0,
+ rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1,
!done ? ' ' : (rs->rs_status == 0) ? '*' : '!');
}
@@ -867,7 +940,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET)))
return;
- printk(KERN_DEBUG "rx queue %x, link %p\n",
+ printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
ath5k_hw_get_rxdp(ah), sc->rxlink);
spin_lock_bh(&sc->rxbuflock);
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index ddd5b3a99e8d..606ae94a9157 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -77,6 +77,7 @@ struct ath5k_dbg_info {
struct dentry *debugfs_antenna;
struct dentry *debugfs_frameerrors;
struct dentry *debugfs_ani;
+ struct dentry *debugfs_queue;
};
/**
@@ -115,18 +116,12 @@ enum ath5k_debug_level {
ATH5K_DEBUG_DUMP_RX = 0x00000100,
ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
- ATH5K_DEBUG_TRACE = 0x00001000,
ATH5K_DEBUG_ANI = 0x00002000,
ATH5K_DEBUG_ANY = 0xffffffff
};
#ifdef CONFIG_ATH5K_DEBUG
-#define ATH5K_TRACE(_sc) do { \
- if (unlikely((_sc)->debug.level & ATH5K_DEBUG_TRACE)) \
- printk(KERN_DEBUG "ath5k trace %s:%d\n", __func__, __LINE__); \
- } while (0)
-
#define ATH5K_DBG(_sc, _m, _fmt, ...) do { \
if (unlikely((_sc)->debug.level & (_m) && net_ratelimit())) \
ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \
@@ -168,8 +163,6 @@ ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
#include <linux/compiler.h>
-#define ATH5K_TRACE(_sc) typecheck(struct ath5k_softc *, (_sc))
-
static inline void __attribute__ ((format (printf, 3, 4)))
ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {}
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 7d7b646ab65a..43244382f213 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -91,14 +91,13 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
/*
- * Verify and set header length
- * XXX: I only found that on 5210 code, does it work on 5211 ?
+ * Verify and set header length (only 5210)
*/
if (ah->ah_version == AR5K_AR5210) {
- if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
+ if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210)
return -EINVAL;
tx_ctl->tx_control_0 |=
- AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
+ AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210);
}
/*Differences between 5210-5211*/
@@ -110,11 +109,11 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
case AR5K_PKT_TYPE_PIFS:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
default:
- frame_type = type /*<< 2 ?*/;
+ frame_type = type;
}
tx_ctl->tx_control_0 |=
- AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
+ AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) |
AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
} else {
@@ -123,21 +122,30 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
AR5K_REG_SM(antenna_mode,
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
tx_ctl->tx_control_1 |=
- AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
+ AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211);
}
+
#define _TX_FLAGS(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
tx_ctl->tx_control_##_c |= \
AR5K_2W_TX_DESC_CTL##_c##_##_flag; \
}
-
+#define _TX_FLAGS_5211(_c, _flag) \
+ if (flags & AR5K_TXDESC_##_flag) { \
+ tx_ctl->tx_control_##_c |= \
+ AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \
+ }
_TX_FLAGS(0, CLRDMASK);
- _TX_FLAGS(0, VEOL);
_TX_FLAGS(0, INTREQ);
_TX_FLAGS(0, RTSENA);
- _TX_FLAGS(1, NOACK);
+
+ if (ah->ah_version == AR5K_AR5211) {
+ _TX_FLAGS_5211(0, VEOL);
+ _TX_FLAGS_5211(1, NOACK);
+ }
#undef _TX_FLAGS
+#undef _TX_FLAGS_5211
/*
* WEP crap
@@ -147,7 +155,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
tx_ctl->tx_control_1 |=
AR5K_REG_SM(key_index,
- AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
+ AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX);
}
/*
@@ -156,7 +164,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
if ((ah->ah_version == AR5K_AR5210) &&
(flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
tx_ctl->tx_control_1 |= rtscts_duration &
- AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
+ AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210;
return 0;
}
@@ -176,7 +184,6 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_hw_4w_tx_ctl *tx_ctl;
unsigned int frame_len;
- ATH5K_TRACE(ah->ah_sc);
tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
/*
@@ -256,7 +263,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
if (key_index != AR5K_TXKEYIX_INVALID) {
tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
- AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
+ AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
}
/*
@@ -278,13 +285,17 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
/*
* Initialize a 4-word multi rate retry tx control descriptor on 5212
*/
-static int
+int
ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
+ /* no mrr support for cards older than 5212 */
+ if (ah->ah_version < AR5K_AR5212)
+ return 0;
+
/*
* Rates can be 0 as long as the retry count is 0 too.
* A zero rate and nonzero retry count will put the HW into a mode where
@@ -324,15 +335,6 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/* no mrr support for cards older than 5212 */
-static int
-ath5k_hw_setup_no_mrr(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
- u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
-{
- return 0;
-}
-
/*
* Proccess the tx status descriptor on 5210/5211
*/
@@ -342,8 +344,6 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
struct ath5k_hw_2w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
- ATH5K_TRACE(ah->ah_sc);
-
tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
tx_status = &desc->ud.ds_tx5210.tx_stat;
@@ -396,8 +396,6 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
struct ath5k_hw_4w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
- ATH5K_TRACE(ah->ah_sc);
-
tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
tx_status = &desc->ud.ds_tx5212.tx_stat;
@@ -419,11 +417,11 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
ts->ts_antenna = (tx_status->tx_status_1 &
- AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
+ AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
ts->ts_status = 0;
ts->ts_final_idx = AR5K_REG_MS(tx_status->tx_status_1,
- AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX);
+ AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
/* The longretry counter has the number of un-acked retries
* for the final rate. To get the total number of retries
@@ -485,12 +483,11 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
/*
* Initialize an rx control descriptor
*/
-static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- u32 size, unsigned int flags)
+int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
+ u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
- ATH5K_TRACE(ah->ah_sc);
rx_ctl = &desc->ud.ds_rx.rx_ctl;
/*
@@ -502,10 +499,11 @@ static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
*/
memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
+ if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN))
+ return -EINVAL;
+
/* Setup descriptor */
rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
- if (unlikely(rx_ctl->rx_control_1 != size))
- return -EINVAL;
if (flags & AR5K_RXDESC_INTREQ)
rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
@@ -521,13 +519,15 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
{
struct ath5k_hw_rx_status *rx_status;
- rx_status = &desc->ud.ds_rx.u.rx_stat;
+ rx_status = &desc->ud.ds_rx.rx_stat;
/* No frame received / not ready */
if (unlikely(!(rx_status->rx_status_1 &
- AR5K_5210_RX_DESC_STATUS1_DONE)))
+ AR5K_5210_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
+ memset(rs, 0, sizeof(struct ath5k_rx_status));
+
/*
* Frame receive status
*/
@@ -537,15 +537,23 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
- rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
- AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA);
rs->rs_more = !!(rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_MORE);
- /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
+ /* TODO: this timestamp is 13 bit, later on we assume 15 bit!
+ * also the HAL code for 5210 says the timestamp is bits [10..22] of the
+ * TSF, and extends the timestamp here to 15 bit.
+ * we need to check on 5210...
+ */
rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
- rs->rs_status = 0;
- rs->rs_phyerr = 0;
+
+ if (ah->ah_version == AR5K_AR5211)
+ rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
+ AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211);
+ else
+ rs->rs_antenna = (rx_status->rx_status_0 &
+ AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210)
+ ? 2 : 1;
/*
* Key table status
@@ -560,19 +568,21 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
* Receive/descriptor errors
*/
if (!(rx_status->rx_status_1 &
- AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+ AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
- if (rx_status->rx_status_1 &
- AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
+ /* only on 5210 */
+ if ((ah->ah_version == AR5K_AR5210) &&
+ (rx_status->rx_status_1 &
+ AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210))
rs->rs_status |= AR5K_RXERR_FIFO;
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
- rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1,
+ rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
}
@@ -588,22 +598,20 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
* Proccess the rx status descriptor on 5212
*/
static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
- struct ath5k_hw_rx_error *rx_err;
- ATH5K_TRACE(ah->ah_sc);
- rx_status = &desc->ud.ds_rx.u.rx_stat;
-
- /* Overlay on error */
- rx_err = &desc->ud.ds_rx.u.rx_err;
+ rx_status = &desc->ud.ds_rx.rx_stat;
/* No frame received / not ready */
if (unlikely(!(rx_status->rx_status_1 &
- AR5K_5212_RX_DESC_STATUS1_DONE)))
+ AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
+ memset(rs, 0, sizeof(struct ath5k_rx_status));
+
/*
* Frame receive status
*/
@@ -619,15 +627,13 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
AR5K_5212_RX_DESC_STATUS0_MORE);
rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
- rs->rs_status = 0;
- rs->rs_phyerr = 0;
/*
* Key table status
*/
if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
- AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
+ AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
else
rs->rs_keyix = AR5K_RXKEYIX_INVALID;
@@ -635,7 +641,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
* Receive/descriptor errors
*/
if (!(rx_status->rx_status_1 &
- AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
+ AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rx_status->rx_status_1 &
AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
@@ -643,9 +649,10 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
if (rx_status->rx_status_1 &
AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
- rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
- AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
- ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
+ rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
+ AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rx_status->rx_status_1 &
@@ -656,7 +663,6 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
rs->rs_status |= AR5K_RXERR_MIC;
}
-
return 0;
}
@@ -665,29 +671,15 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
*/
int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
-
- if (ah->ah_version != AR5K_AR5210 &&
- ah->ah_version != AR5K_AR5211 &&
- ah->ah_version != AR5K_AR5212)
- return -ENOTSUPP;
-
if (ah->ah_version == AR5K_AR5212) {
- ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
- ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
- } else {
- ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
+ ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
+ } else if (ah->ah_version <= AR5K_AR5211) {
ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
- ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_no_mrr;
ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
- }
-
- if (ah->ah_version == AR5K_AR5212)
- ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
- else if (ah->ah_version <= AR5K_AR5211)
ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
-
+ } else
+ return -ENOTSUPP;
return 0;
}
-
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 64538fbe4167..b2adb2a281c2 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -17,28 +17,24 @@
*/
/*
- * Internal RX/TX descriptor structures
- * (rX: reserved fields possibily used by future versions of the ar5k chipset)
+ * RX/TX descriptor structures
*/
/*
- * common hardware RX control descriptor
+ * Common hardware RX control descriptor
*/
struct ath5k_hw_rx_ctl {
u32 rx_control_0; /* RX control word 0 */
u32 rx_control_1; /* RX control word 1 */
} __packed;
-/* RX control word 0 field/sflags */
-#define AR5K_DESC_RX_CTL0 0x00000000
-
/* RX control word 1 fields/flags */
-#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff
-#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000
+#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
+#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
/*
- * common hardware RX status descriptor
- * 5210/11 and 5212 differ only in the flags defined below
+ * Common hardware RX status descriptor
+ * 5210, 5211 and 5212 differ only in the fields and flags defined below
*/
struct ath5k_hw_rx_status {
u32 rx_status_0; /* RX status word 0 */
@@ -47,81 +43,69 @@ struct ath5k_hw_rx_status {
/* 5210/5211 */
/* RX status word 0 fields/flags */
-#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff
-#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000
+#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
+#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210 0x00004000 /* [5210] receive on ant 1 */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 /* reception rate */
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S 15
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000 /* rssi */
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000
-#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211 0x38000000 /* [5211] receive antenna */
+#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211_S 27
/* RX status word 1 fields/flags */
-#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001
-#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
-#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004
-#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN 0x00000008
-#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010
-#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0
+#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
+#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* reception success */
+#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
+#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210 0x00000008 /* [5210] FIFO overrun */
+#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 /* decyption CRC failure */
+#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 /* PHY error */
#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 /* decyption key index */
#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9
-#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000
+#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 /* 13 bit of TSF */
#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15
-#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000
+#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 /* key cache miss */
/* 5212 */
/* RX status word 0 fields/flags */
-#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff
-#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000
-#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000
+#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
+#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 /* decompression CRC error */
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000 /* reception rate */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S 15
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000 /* rssi */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20
-#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000
+#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 /* receive antenna */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28
/* RX status word 1 fields/flags */
-#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001
-#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
-#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004
-#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008
-#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010
-#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020
-#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100
-#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00
+#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
+#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* frame reception success */
+#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
+#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008 /* decryption CRC failure */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010 /* PHY error */
+#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020 /* MIC decrypt error */
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
+#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00 /* decryption key index */
#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S 9
-#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000
+#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000 /* first 15bit of the TSF */
#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16
-#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000
-
-/*
- * common hardware RX error descriptor
- */
-struct ath5k_hw_rx_error {
- u32 rx_error_0; /* RX status word 0 */
- u32 rx_error_1; /* RX status word 1 */
-} __packed;
-
-/* RX error word 0 fields/flags */
-#define AR5K_RX_DESC_ERROR0 0x00000000
-
-/* RX error word 1 fields/flags */
-#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
-#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
+#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000 /* key cache miss */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE 0x0000ff00 /* phy error code overlays key index and valid fields */
+#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE_S 8
/**
* enum ath5k_phy_error_code - PHY Error codes
*/
enum ath5k_phy_error_code {
- AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */
AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
- AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */
+ AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */
AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
/* these are specific to the 5212 */
@@ -148,112 +132,111 @@ struct ath5k_hw_2w_tx_ctl {
} __packed;
/* TX control word 0 fields/flags */
-#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff
-#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN 0x0003f000 /*[5210 ?]*/
-#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S 12
-#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000
+#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210 0x0003f000 /* [5210] header length */
+#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210_S 12
+#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000 /* tx rate */
#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S 18
-#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000
-#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000
-#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET 0x00800000 /*[5210]*/
-#define AR5K_2W_TX_DESC_CTL0_VEOL 0x00800000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE 0x1c000000 /*[5210]*/
-#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S 26
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000
-#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000
-
+#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
+#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET_5210 0x00800000 /* [5210] long packet */
+#define AR5K_2W_TX_DESC_CTL0_VEOL_5211 0x00800000 /* [5211] virtual end-of-list */
+#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 /* [5210] antenna selection */
+#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 /* [5211] antenna selection */
#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \
(ah->ah_version == AR5K_AR5210 ? \
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
-
#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
-#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000
-#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210 0x1c000000 /* [5210] frame type */
+#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210_S 26
+#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
+#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* key is valid */
/* TX control word 1 fields/flags */
-#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff
-#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 0x0007e000
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211 0x000fe000
-
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX \
+#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
+#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 0x0007e000 /* [5210] key table index */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211 0x000fe000 /* [5211] key table index */
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX \
(ah->ah_version == AR5K_AR5210 ? \
- AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \
- AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211)
-
-#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13
-#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE 0x00700000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S 20
-#define AR5K_2W_TX_DESC_CTL1_NOACK 0x00800000 /*[5211]*/
-#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION 0xfff80000 /*[5210 ?]*/
+ AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 : \
+ AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211)
+#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_S 13
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211 0x00700000 /* [5211] frame type */
+#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211_S 20
+#define AR5K_2W_TX_DESC_CTL1_NOACK_5211 0x00800000 /* [5211] no ACK */
+#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210 0xfff80000 /* [5210] lower 13 bit of duration */
/* Frame types */
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0x00
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 0x04
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 0x08
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 0x0c
-#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 0x10
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 1
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 2
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 3
+#define AR5K_AR5211_TX_DESC_FRAME_TYPE_BEACON 3
+#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
+#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
/*
* 5212 hardware 4-word TX control descriptor
*/
struct ath5k_hw_4w_tx_ctl {
u32 tx_control_0; /* TX control word 0 */
+ u32 tx_control_1; /* TX control word 1 */
+ u32 tx_control_2; /* TX control word 2 */
+ u32 tx_control_3; /* TX control word 3 */
+} __packed;
-#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff
-#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000
+/* TX control word 0 fields/flags */
+#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
+#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000 /* transmit power */
#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S 16
-#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000
-#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000
-#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000
-#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000
+#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
+#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000 /* virtual end-of-list */
+#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
+#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000 /* TX antenna selection */
#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
-#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000
-#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000
-#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000
-
- u32 tx_control_1; /* TX control word 1 */
+#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
+#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* destination index valid */
+#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000 /* precede frame with CTS */
-#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff
-#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000
-#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX 0x000fe000
-#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13
-#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000
+/* TX control word 1 fields/flags */
+#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
+#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX 0x000fe000 /* destination table index */
+#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX_S 13
+#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000 /* frame type */
#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S 20
-#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000
-#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000
+#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000 /* no ACK */
+#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000 /* compression processing */
#define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S 25
-#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000 /* length of frame IV */
#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S 27
-#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000
+#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000 /* length of frame ICV */
#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S 29
- u32 tx_control_2; /* TX control word 2 */
-
-#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff
-#define AR5K_4W_TX_DESC_CTL2_DURATION_UPDATE_ENABLE 0x00008000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000
-#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28
-
- u32 tx_control_3; /* TX control word 3 */
-
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0
+/* TX control word 2 fields/flags */
+#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff /* RTS/CTS duration */
+#define AR5K_4W_TX_DESC_CTL2_DURATION_UPD_EN 0x00008000 /* frame duration update */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000 /* series 0 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000 /* series 1 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000 /* series 2 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000 /* series 3 max attempts */
+#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28
+
+/* TX control word 3 fields/flags */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f /* series 0 tx rate */
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0 /* series 1 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S 5
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00 /* series 2 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S 10
-#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000
+#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000 /* series 3 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S 15
-#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000
+#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
-} __packed;
/*
* Common TX status descriptor
@@ -264,37 +247,34 @@ struct ath5k_hw_tx_status {
} __packed;
/* TX status word 0 fields/flags */
-#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001
-#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002
-#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004
-#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008
-/*???
-#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT 0x000000f0
-#define AR5K_DESC_TX_STATUS0_RTS_FAIL_COUNT_S 4
-*/
-#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0
+#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */
+#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 /* excessive retries */
+#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 /* FIFO underrun */
+#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008 /* TX filter indication */
+/* according to the HAL sources the spec has short/long retry counts reversed.
+ * we have it reversed to the HAL sources as well, for 5210 and 5211.
+ * For 5212 these fields are defined as RTS_FAIL_COUNT and DATA_FAIL_COUNT,
+ * but used respectively as SHORT and LONG retry count in the code later. This
+ * is consistent with the definitions here... TODO: check */
+#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0 /* short retry count */
#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S 4
-/*???
-#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT 0x00000f00
-#define AR5K_DESC_TX_STATUS0_DATA_FAIL_COUNT_S 8
-*/
-#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00
+#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00 /* long retry count */
#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S 8
-#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT 0x0000f000
-#define AR5K_DESC_TX_STATUS0_VIRT_COLL_COUNT_S 12
-#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000
+#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5211 0x0000f000 /* [5211+] virtual collision count */
+#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5212_S 12
+#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 /* TX timestamp */
#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16
/* TX status word 1 fields/flags */
-#define AR5K_DESC_TX_STATUS1_DONE 0x00000001
-#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe
+#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 /* descriptor complete */
+#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe /* TX sequence number */
#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1
-#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000
+#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000 /* signal strength of ACK */
#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S 13
-#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX 0x00600000
-#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S 21
-#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS 0x00800000
-#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA 0x01000000
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212 0x00600000 /* [5212] final TX attempt series ix */
+#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212_S 21
+#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
+#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
/*
* 5210/5211 hardware TX descriptor
@@ -313,18 +293,15 @@ struct ath5k_hw_5212_tx_desc {
} __packed;
/*
- * common hardware RX descriptor
+ * Common hardware RX descriptor
*/
struct ath5k_hw_all_rx_desc {
- struct ath5k_hw_rx_ctl rx_ctl;
- union {
- struct ath5k_hw_rx_status rx_stat;
- struct ath5k_hw_rx_error rx_err;
- } u;
+ struct ath5k_hw_rx_ctl rx_ctl;
+ struct ath5k_hw_rx_status rx_stat;
} __packed;
/*
- * Atheros hardware descriptor
+ * Atheros hardware DMA descriptor
* This is read and written to by the hardware
*/
struct ath5k_desc {
@@ -346,4 +323,3 @@ struct ath5k_desc {
#define AR5K_TXDESC_CTSENA 0x0008
#define AR5K_TXDESC_INTREQ 0x0010
#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/
-
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 941b51130a6f..484f31870ba8 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -48,7 +48,6 @@
*/
void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
}
@@ -62,7 +61,6 @@ int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
{
unsigned int i;
- ATH5K_TRACE(ah->ah_sc);
ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
/*
@@ -96,8 +94,6 @@ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
*/
void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
- ATH5K_TRACE(ah->ah_sc);
-
ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
}
@@ -125,7 +121,6 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
u32 tx_queue;
- ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Return if queue is declared inactive */
@@ -186,7 +181,6 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
unsigned int i = 40;
u32 tx_queue, pending;
- ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Return if queue is declared inactive */
@@ -297,7 +291,6 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
{
u16 tx_reg;
- ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/*
@@ -340,7 +333,6 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
{
u16 tx_reg;
- ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/*
@@ -400,8 +392,6 @@ int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
u32 trigger_level, imr;
int ret = -EIO;
- ATH5K_TRACE(ah->ah_sc);
-
/*
* Disable interrupts by setting the mask
*/
@@ -451,7 +441,6 @@ done:
*/
bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
}
@@ -475,8 +464,6 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
{
u32 data;
- ATH5K_TRACE(ah->ah_sc);
-
/*
* Read interrupt status from the Interrupt Status register
* on 5210
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index ed0263672d6d..ae316fec4a6a 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -35,7 +35,6 @@ static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
{
u32 status, timeout;
- ATH5K_TRACE(ah->ah_sc);
/*
* Initialize EEPROM access
*/
@@ -715,7 +714,7 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
/* Only one curve for RF5111
* find out which one and place
- * in in pd_curves.
+ * in pd_curves.
* Note: ee_x_gain is reversed here */
for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) {
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 64a27e73d02e..bc90503f4b7a 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -34,8 +34,6 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
/*5210 has different led mode handling*/
u32 led_5210;
- ATH5K_TRACE(ah->ah_sc);
-
/*Reset led status*/
if (ah->ah_version != AR5K_AR5210)
AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
@@ -82,7 +80,6 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
*/
int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
{
- ATH5K_TRACE(ah->ah_sc);
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -98,7 +95,6 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
*/
int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
{
- ATH5K_TRACE(ah->ah_sc);
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -114,7 +110,6 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
*/
u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
{
- ATH5K_TRACE(ah->ah_sc);
if (gpio >= AR5K_NUM_GPIO)
return 0xffffffff;
@@ -129,7 +124,6 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
{
u32 data;
- ATH5K_TRACE(ah->ah_sc);
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -153,7 +147,6 @@ void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
{
u32 data;
- ATH5K_TRACE(ah->ah_sc);
if (gpio >= AR5K_NUM_GPIO)
return;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 5212e275f1c7..86fdb6ddfaaa 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -59,8 +59,6 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
beacon_reg = 0;
- ATH5K_TRACE(ah->ah_sc);
-
switch (op_mode) {
case NL80211_IFTYPE_ADHOC:
pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
@@ -173,7 +171,6 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
*/
static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
- ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
<= timeout)
return -EINVAL;
@@ -192,7 +189,6 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
*/
static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
- ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
<= timeout)
return -EINVAL;
@@ -297,7 +293,6 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
u32 low_id, high_id;
u32 pcu_reg;
- ATH5K_TRACE(ah->ah_sc);
/* Set new station ID */
memcpy(common->macaddr, mac, ETH_ALEN);
@@ -357,7 +352,6 @@ void ath5k_hw_set_associd(struct ath5k_hw *ah)
void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
struct ath_common *common = ath5k_hw_common(ah);
- ATH5K_TRACE(ah->ah_sc);
/* Cache bssid mask so that we can restore it
* on reset */
@@ -382,7 +376,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
*/
void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
@@ -397,7 +390,6 @@ void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
*/
void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
@@ -406,8 +398,6 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
*/
void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
- ATH5K_TRACE(ah->ah_sc);
- /* Set the multicat filter */
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
@@ -427,7 +417,6 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
- ATH5K_TRACE(ah->ah_sc);
filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
/*Radar detection for 5212*/
@@ -457,8 +446,6 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
- ATH5K_TRACE(ah->ah_sc);
-
/* Set PHY error filter register on 5212*/
if (ah->ah_version == AR5K_AR5212) {
if (filter & AR5K_RX_FILTER_RADARERR)
@@ -533,8 +520,6 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
WARN_ON( i == ATH5K_MAX_TSF_READ );
- ATH5K_TRACE(ah->ah_sc);
-
return (((u64)tsf_upper1 << 32) | tsf_lower);
}
@@ -548,8 +533,6 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
*/
void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
- ATH5K_TRACE(ah->ah_sc);
-
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
@@ -565,8 +548,6 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
- ATH5K_TRACE(ah->ah_sc);
-
val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
/*
@@ -586,7 +567,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
- ATH5K_TRACE(ah->ah_sc);
/*
* Set the additional timers by mode
*/
@@ -674,7 +654,6 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
unsigned int i, type;
u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
- ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry));
@@ -749,8 +728,6 @@ int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
bool is_tkip;
const u8 *key_ptr;
- ATH5K_TRACE(ah->ah_sc);
-
is_tkip = (key->alg == ALG_TKIP);
/*
@@ -836,7 +813,6 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
{
u32 low_id, high_id;
- ATH5K_TRACE(ah->ah_sc);
/* Invalid entry (key table overflow) */
AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 492cbb15720d..73c4fcd142bb 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -378,8 +378,6 @@ enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
u32 data, type;
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
- ATH5K_TRACE(ah->ah_sc);
-
if (ah->ah_rf_banks == NULL ||
ah->ah_gain.g_state == AR5K_RFGAIN_INACTIVE)
return AR5K_RFGAIN_INACTIVE;
@@ -1167,7 +1165,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
* The median of the values in the history is then loaded into the
* hardware for its own use for RSSI and CCA measurements.
*/
-static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
@@ -1248,7 +1246,6 @@ static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
/*
* Perform a PHY calibration on RF5110
* -Fix BPSK/QAM Constellation (I/Q correction)
- * -Calculate Noise Floor
*/
static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
@@ -1335,8 +1332,6 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
return ret;
}
- ath5k_hw_update_noise_floor(ah);
-
/*
* Re-enable RX/TX and beacons
*/
@@ -1348,22 +1343,20 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
}
/*
- * Perform a PHY calibration on RF5111/5112 and newer chips
+ * Perform I/Q calibration on RF5111/5112 and newer chips
*/
-static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
- struct ieee80211_channel *channel)
+static int
+ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
{
u32 i_pwr, q_pwr;
s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
int i;
- ATH5K_TRACE(ah->ah_sc);
if (!ah->ah_calibration ||
ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
- goto done;
+ return 0;
/* Calibration has finished, get the results and re-run */
-
/* work around empty results which can apparently happen on 5212 */
for (i = 0; i <= 10; i++) {
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
@@ -1384,7 +1377,7 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
/* protect against divide by 0 and loss of sign bits */
if (i_coffd == 0 || q_coffd < 2)
- goto done;
+ return -1;
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1410,17 +1403,6 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN);
-done:
-
- /* TODO: Separate noise floor calibration from I/Q calibration
- * since noise floor calibration interrupts rx path while I/Q
- * calibration doesn't. We don't need to run noise floor calibration
- * as often as I/Q calibration.*/
- ath5k_hw_update_noise_floor(ah);
-
- /* Initiate a gain_F calibration */
- ath5k_hw_request_rfgain_probe(ah);
-
return 0;
}
@@ -1434,8 +1416,10 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
if (ah->ah_radio == AR5K_RF5110)
ret = ath5k_hw_rf5110_calibrate(ah, channel);
- else
- ret = ath5k_hw_rf511x_calibrate(ah, channel);
+ else {
+ ret = ath5k_hw_rf511x_iq_calibrate(ah);
+ ath5k_hw_request_rfgain_probe(ah);
+ }
return ret;
}
@@ -1693,7 +1677,6 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
int ath5k_hw_phy_disable(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
/*Just a try M.F.*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
@@ -1709,8 +1692,6 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
u32 srev;
u16 ret;
- ATH5K_TRACE(ah->ah_sc);
-
/*
* Set the radio chip access register
*/
@@ -1755,8 +1736,6 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
static void /*TODO:Boundary check*/
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
- ATH5K_TRACE(ah->ah_sc);
-
if (ah->ah_version != AR5K_AR5210)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
@@ -1789,19 +1768,50 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
if (enable) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
- AR5K_PHY_RESTART_DIV_GC, 0xc);
+ AR5K_PHY_RESTART_DIV_GC, 1);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
AR5K_PHY_FAST_ANT_DIV_EN);
} else {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
- AR5K_PHY_RESTART_DIV_GC, 0x8);
+ AR5K_PHY_RESTART_DIV_GC, 0);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
AR5K_PHY_FAST_ANT_DIV_EN);
}
}
+void
+ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
+{
+ u8 ant0, ant1;
+
+ /*
+ * In case a fixed antenna was set as default
+ * use the same switch table twice.
+ */
+ if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A)
+ ant0 = ant1 = AR5K_ANT_SWTABLE_A;
+ else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B)
+ ant0 = ant1 = AR5K_ANT_SWTABLE_B;
+ else {
+ ant0 = AR5K_ANT_SWTABLE_A;
+ ant1 = AR5K_ANT_SWTABLE_B;
+ }
+
+ /* Set antenna idle switch table */
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL,
+ AR5K_PHY_ANT_CTL_SWTABLE_IDLE,
+ (ah->ah_ant_ctl[ee_mode][AR5K_ANT_CTL] |
+ AR5K_PHY_ANT_CTL_TXRX_EN));
+
+ /* Set antenna switch tables */
+ ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant0],
+ AR5K_PHY_ANT_SWITCH_TABLE_0);
+ ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant1],
+ AR5K_PHY_ANT_SWITCH_TABLE_1);
+}
+
/*
* Set antenna operating mode
*/
@@ -1823,8 +1833,6 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
def_ant = ah->ah_def_ant;
- ATH5K_TRACE(ah->ah_sc);
-
switch (channel->hw_value & CHANNEL_MODES) {
case CHANNEL_A:
case CHANNEL_T:
@@ -1923,6 +1931,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
if (sta_id1)
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, sta_id1);
+ ath5k_hw_set_antenna_switch(ah, ee_mode);
/* Note: set diversity before default antenna
* because it won't work correctly */
ath5k_hw_set_fast_div(ah, ee_mode, fast_div);
@@ -2988,7 +2997,6 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
u8 type;
int ret;
- ATH5K_TRACE(ah->ah_sc);
if (txpower > AR5K_TUNE_MAX_TXPOWER) {
ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
return -EINVAL;
@@ -3084,8 +3092,6 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
struct ieee80211_channel *channel = ah->ah_current_channel;
u8 ee_mode;
- ATH5K_TRACE(ah->ah_sc);
-
switch (channel->hw_value & CHANNEL_MODES) {
case CHANNEL_A:
case CHANNEL_T:
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index f5831da33f7b..4186ff4c6e9c 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -31,7 +31,6 @@ Queue Control Unit, DFS Control Unit Functions
int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
struct ath5k_txq_info *queue_info)
{
- ATH5K_TRACE(ah->ah_sc);
memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
return 0;
}
@@ -42,7 +41,6 @@ int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
const struct ath5k_txq_info *queue_info)
{
- ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
@@ -69,8 +67,6 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
unsigned int queue;
int ret;
- ATH5K_TRACE(ah->ah_sc);
-
/*
* Get queue by type
*/
@@ -149,7 +145,6 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
{
u32 pending;
- ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Return if queue is declared inactive */
@@ -177,7 +172,6 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
*/
void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
- ATH5K_TRACE(ah->ah_sc);
if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
return;
@@ -195,7 +189,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
u32 cw_min, cw_max, retry_lg, retry_sh;
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
- ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
tq = &ah->ah_txq[queue];
@@ -523,8 +516,6 @@ int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
{
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
- ATH5K_TRACE(ah->ah_sc);
-
if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 307f80e83f94..498aa28ea9e6 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -201,8 +201,6 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
int ret;
u32 mask = val ? val : ~0U;
- ATH5K_TRACE(ah->ah_sc);
-
/* Read-and-clear RX Descriptor Pointer*/
ath5k_hw_reg_read(ah, AR5K_RXDP);
@@ -246,7 +244,6 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
unsigned int i;
u32 staid, data;
- ATH5K_TRACE(ah->ah_sc);
staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
switch (mode) {
@@ -393,8 +390,6 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
mode = 0;
clock = 0;
- ATH5K_TRACE(ah->ah_sc);
-
/* Wakeup the device */
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
@@ -734,7 +729,7 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
}
static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
- struct ieee80211_channel *channel, u8 *ant, u8 ee_mode)
+ struct ieee80211_channel *channel, u8 ee_mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
s16 cck_ofdm_pwr_delta;
@@ -768,17 +763,9 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
ee->ee_cck_ofdm_gain_delta;
}
- /* Set antenna idle switch table */
- AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL,
- AR5K_PHY_ANT_CTL_SWTABLE_IDLE,
- (ah->ah_ant_ctl[ee_mode][0] |
- AR5K_PHY_ANT_CTL_TXRX_EN));
-
- /* Set antenna switch tables */
- ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[0]],
- AR5K_PHY_ANT_SWITCH_TABLE_0);
- ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant[1]],
- AR5K_PHY_ANT_SWITCH_TABLE_1);
+ /* XXX: necessary here? is called from ath5k_hw_set_antenna_mode()
+ * too */
+ ath5k_hw_set_antenna_switch(ah, ee_mode);
/* Noise floor threshold */
ath5k_hw_reg_write(ah,
@@ -855,7 +842,6 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
AR5K_PHY_NF_THRESH62,
ee->ee_thr_62[ee_mode]);
-
/* False detect backoff for channels
* that have spur noise. Write the new
* cyclic power RSSI threshold. */
@@ -891,14 +877,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
struct ieee80211_channel *channel, bool change_channel)
{
struct ath_common *common = ath5k_hw_common(ah);
- u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo;
+ u32 s_seq[10], s_led[3], staid1_flags, tsf_up, tsf_lo;
u32 phy_tst1;
- u8 mode, freq, ee_mode, ant[2];
+ u8 mode, freq, ee_mode;
int i, ret;
- ATH5K_TRACE(ah->ah_sc);
-
- s_ant = 0;
ee_mode = 0;
staid1_flags = 0;
tsf_up = 0;
@@ -995,9 +978,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
}
}
- /* Save default antenna */
- s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
-
if (ah->ah_version == AR5K_AR5212) {
/* Restore normal 32/40MHz clock operation
* to avoid register access delay on certain
@@ -1094,22 +1074,17 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Write OFDM timings on 5212*/
if (ah->ah_version == AR5K_AR5212 &&
channel->hw_value & CHANNEL_OFDM) {
- struct ath5k_eeprom_info *ee =
- &ah->ah_capabilities.cap_eeprom;
ret = ath5k_hw_write_ofdm_timings(ah, channel);
if (ret)
return ret;
- /* Note: According to docs we can have a newer
- * EEPROM on old hardware, so we need to verify
- * that our hardware is new enough to have spur
- * mitigation registers (delta phase etc) */
- if (ah->ah_mac_srev >= AR5K_SREV_AR5424 ||
- (ah->ah_mac_srev >= AR5K_SREV_AR5424 &&
- ee->ee_version >= AR5K_EEPROM_VERSION_5_3))
+ /* Spur info is available only from EEPROM versions
+ * bigger than 5.3 but but the EEPOM routines will use
+ * static values for older versions */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
ath5k_hw_set_spur_mitigation_filter(ah,
- channel);
+ channel);
}
/*Enable/disable 802.11b mode on 5111
@@ -1123,21 +1098,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
AR5K_TXCFG_B_MODE);
}
- /*
- * In case a fixed antenna was set as default
- * use the same switch table twice.
- */
- if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A)
- ant[0] = ant[1] = AR5K_ANT_SWTABLE_A;
- else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B)
- ant[0] = ant[1] = AR5K_ANT_SWTABLE_B;
- else {
- ant[0] = AR5K_ANT_SWTABLE_A;
- ant[1] = AR5K_ANT_SWTABLE_B;
- }
-
/* Commit values from EEPROM */
- ath5k_hw_commit_eeprom_settings(ah, channel, ant, ee_mode);
+ ath5k_hw_commit_eeprom_settings(ah, channel, ee_mode);
} else {
/*
@@ -1175,8 +1137,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
}
}
-
- ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
}
/* Ledstate */
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
new file mode 100644
index 000000000000..90757de7bf59
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -0,0 +1,116 @@
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "base.h"
+#include "ath5k.h"
+#include "reg.h"
+
+#define SIMPLE_SHOW_STORE(name, get, set) \
+static ssize_t ath5k_attr_show_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ return snprintf(buf, PAGE_SIZE, "%d\n", get); \
+} \
+ \
+static ssize_t ath5k_attr_store_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ int val; \
+ \
+ val = (int)simple_strtoul(buf, NULL, 10); \
+ set(sc->ah, val); \
+ return count; \
+} \
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
+ ath5k_attr_show_##name, ath5k_attr_store_##name)
+
+#define SIMPLE_SHOW(name, get) \
+static ssize_t ath5k_attr_show_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ return snprintf(buf, PAGE_SIZE, "%d\n", get); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
+
+/*** ANI ***/
+
+SIMPLE_SHOW_STORE(ani_mode, sc->ani_state.ani_mode, ath5k_ani_init);
+SIMPLE_SHOW_STORE(noise_immunity_level, sc->ani_state.noise_imm_level,
+ ath5k_ani_set_noise_immunity_level);
+SIMPLE_SHOW_STORE(spur_level, sc->ani_state.spur_level,
+ ath5k_ani_set_spur_immunity_level);
+SIMPLE_SHOW_STORE(firstep_level, sc->ani_state.firstep_level,
+ ath5k_ani_set_firstep_level);
+SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, sc->ani_state.ofdm_weak_sig,
+ ath5k_ani_set_ofdm_weak_signal_detection);
+SIMPLE_SHOW_STORE(cck_weak_signal_detection, sc->ani_state.cck_weak_sig,
+ ath5k_ani_set_cck_weak_signal_detection);
+SIMPLE_SHOW(spur_level_max, sc->ani_state.max_spur_level);
+
+static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL);
+}
+static DEVICE_ATTR(noise_immunity_level_max, S_IRUGO,
+ ath5k_attr_show_noise_immunity_level_max, NULL);
+
+static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL);
+}
+static DEVICE_ATTR(firstep_level_max, S_IRUGO,
+ ath5k_attr_show_firstep_level_max, NULL);
+
+static struct attribute *ath5k_sysfs_entries_ani[] = {
+ &dev_attr_ani_mode.attr,
+ &dev_attr_noise_immunity_level.attr,
+ &dev_attr_spur_level.attr,
+ &dev_attr_firstep_level.attr,
+ &dev_attr_ofdm_weak_signal_detection.attr,
+ &dev_attr_cck_weak_signal_detection.attr,
+ &dev_attr_noise_immunity_level_max.attr,
+ &dev_attr_spur_level_max.attr,
+ &dev_attr_firstep_level_max.attr,
+ NULL
+};
+
+static struct attribute_group ath5k_attribute_group_ani = {
+ .name = "ani",
+ .attrs = ath5k_sysfs_entries_ani,
+};
+
+
+/*** register / unregister ***/
+
+int
+ath5k_sysfs_register(struct ath5k_softc *sc)
+{
+ struct device *dev = &sc->pdev->dev;
+ int err;
+
+ err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
+ if (err) {
+ ATH5K_ERR(sc, "failed to create sysfs group\n");
+ return err;
+ }
+
+ return 0;
+}
+
+void
+ath5k_sysfs_unregister(struct ath5k_softc *sc)
+{
+ struct device *dev = &sc->pdev->dev;
+
+ sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
+}
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index dd112be218ab..973ae4f49f35 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -32,7 +32,8 @@ ath9k_hw-y:= \
mac.o \
ar9002_mac.o \
ar9003_mac.o \
- ar9003_eeprom.o
+ ar9003_eeprom.o \
+ ar9003_paprd.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ba8b20f01594..cc648b6ae31c 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,8 +17,99 @@
#include "hw.h"
#include "hw-ops.h"
-static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
- struct ath9k_channel *chan)
+struct ani_ofdm_level_entry {
+ int spur_immunity_level;
+ int fir_step_level;
+ int ofdm_weak_signal_on;
+};
+
+/* values here are relative to the INI */
+
+/*
+ * Legend:
+ *
+ * SI: Spur immunity
+ * FS: FIR Step
+ * WS: OFDM / CCK Weak Signal detection
+ * MRC-CCK: Maximal Ratio Combining for CCK
+ */
+
+static const struct ani_ofdm_level_entry ofdm_level_table[] = {
+ /* SI FS WS */
+ { 0, 0, 1 }, /* lvl 0 */
+ { 1, 1, 1 }, /* lvl 1 */
+ { 2, 2, 1 }, /* lvl 2 */
+ { 3, 2, 1 }, /* lvl 3 (default) */
+ { 4, 3, 1 }, /* lvl 4 */
+ { 5, 4, 1 }, /* lvl 5 */
+ { 6, 5, 1 }, /* lvl 6 */
+ { 7, 6, 1 }, /* lvl 7 */
+ { 7, 7, 1 }, /* lvl 8 */
+ { 7, 8, 0 } /* lvl 9 */
+};
+#define ATH9K_ANI_OFDM_NUM_LEVEL \
+ (sizeof(ofdm_level_table)/sizeof(ofdm_level_table[0]))
+#define ATH9K_ANI_OFDM_MAX_LEVEL \
+ (ATH9K_ANI_OFDM_NUM_LEVEL-1)
+#define ATH9K_ANI_OFDM_DEF_LEVEL \
+ 3 /* default level - matches the INI settings */
+
+/*
+ * MRC (Maximal Ratio Combining) has always been used with multi-antenna ofdm.
+ * With OFDM for single stream you just add up all antenna inputs, you're
+ * only interested in what you get after FFT. Signal aligment is also not
+ * required for OFDM because any phase difference adds up in the frequency
+ * domain.
+ *
+ * MRC requires extra work for use with CCK. You need to align the antenna
+ * signals from the different antenna before you can add the signals together.
+ * You need aligment of signals as CCK is in time domain, so addition can cancel
+ * your signal completely if phase is 180 degrees (think of adding sine waves).
+ * You also need to remove noise before the addition and this is where ANI
+ * MRC CCK comes into play. One of the antenna inputs may be stronger but
+ * lower SNR, so just adding after alignment can be dangerous.
+ *
+ * Regardless of alignment in time, the antenna signals add constructively after
+ * FFT and improve your reception. For more information:
+ *
+ * http://en.wikipedia.org/wiki/Maximal-ratio_combining
+ */
+
+struct ani_cck_level_entry {
+ int fir_step_level;
+ int mrc_cck_on;
+};
+
+static const struct ani_cck_level_entry cck_level_table[] = {
+ /* FS MRC-CCK */
+ { 0, 1 }, /* lvl 0 */
+ { 1, 1 }, /* lvl 1 */
+ { 2, 1 }, /* lvl 2 (default) */
+ { 3, 1 }, /* lvl 3 */
+ { 4, 0 }, /* lvl 4 */
+ { 5, 0 }, /* lvl 5 */
+ { 6, 0 }, /* lvl 6 */
+ { 7, 0 }, /* lvl 7 (only for high rssi) */
+ { 8, 0 } /* lvl 8 (only for high rssi) */
+};
+
+#define ATH9K_ANI_CCK_NUM_LEVEL \
+ (sizeof(cck_level_table)/sizeof(cck_level_table[0]))
+#define ATH9K_ANI_CCK_MAX_LEVEL \
+ (ATH9K_ANI_CCK_NUM_LEVEL-1)
+#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \
+ (ATH9K_ANI_CCK_NUM_LEVEL-3)
+#define ATH9K_ANI_CCK_DEF_LEVEL \
+ 2 /* default level - matches the INI settings */
+
+/* Private to ani.c */
+static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->ani_lower_immunity(ah);
+}
+
+int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
+ struct ath9k_channel *chan)
{
int i;
@@ -48,7 +139,7 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
stats->beacons += REG_READ(ah, AR_BEACON_CNT);
}
-static void ath9k_ani_restart(struct ath_hw *ah)
+static void ath9k_ani_restart_old(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
struct ath_common *common = ath9k_hw_common(ah);
@@ -96,7 +187,42 @@ static void ath9k_ani_restart(struct ath_hw *ah)
aniState->cckPhyErrCount = 0;
}
-static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
+static void ath9k_ani_restart_new(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ah->curani;
+ aniState->listenTime = 0;
+
+ aniState->ofdmPhyErrBase = 0;
+ aniState->cckPhyErrBase = 0;
+
+ ath_print(common, ATH_DBG_ANI,
+ "Writing ofdmbase=%08x cckbase=%08x\n",
+ aniState->ofdmPhyErrBase,
+ aniState->cckPhyErrBase);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+ aniState->ofdmPhyErrCount = 0;
+ aniState->cckPhyErrCount = 0;
+}
+
+static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
struct ar5416AniState *aniState;
@@ -168,7 +294,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
}
}
-static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
+static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
struct ar5416AniState *aniState;
@@ -206,7 +332,125 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
}
}
-static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
+/* Adjust the OFDM Noise Immunity Level */
+static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+ const struct ani_ofdm_level_entry *entry_ofdm;
+ const struct ani_cck_level_entry *entry_cck;
+
+ aniState->noiseFloor = BEACON_RSSI(ah);
+
+ ath_print(common, ATH_DBG_ANI,
+ "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ aniState->ofdmNoiseImmunityLevel,
+ immunityLevel, aniState->noiseFloor,
+ aniState->rssiThrLow, aniState->rssiThrHigh);
+
+ aniState->ofdmNoiseImmunityLevel = immunityLevel;
+
+ entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
+ entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
+
+ if (aniState->spurImmunityLevel != entry_ofdm->spur_immunity_level)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
+ entry_ofdm->spur_immunity_level);
+
+ if (aniState->firstepLevel != entry_ofdm->fir_step_level &&
+ entry_ofdm->fir_step_level >= entry_cck->fir_step_level)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_FIRSTEP_LEVEL,
+ entry_ofdm->fir_step_level);
+
+ if ((ah->opmode != NL80211_IFTYPE_STATION &&
+ ah->opmode != NL80211_IFTYPE_ADHOC) ||
+ aniState->noiseFloor <= aniState->rssiThrHigh) {
+ if (aniState->ofdmWeakSigDetectOff)
+ /* force on ofdm weak sig detect */
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ true);
+ else if (aniState->ofdmWeakSigDetectOff ==
+ entry_ofdm->ofdm_weak_signal_on)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+ entry_ofdm->ofdm_weak_signal_on);
+ }
+}
+
+static void ath9k_hw_ani_ofdm_err_trigger_new(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ah->curani;
+
+ if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1);
+}
+
+/*
+ * Set the ANI settings to match an CCK level.
+ */
+static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+ const struct ani_ofdm_level_entry *entry_ofdm;
+ const struct ani_cck_level_entry *entry_cck;
+
+ aniState->noiseFloor = BEACON_RSSI(ah);
+ ath_print(common, ATH_DBG_ANI,
+ "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ aniState->cckNoiseImmunityLevel, immunityLevel,
+ aniState->noiseFloor, aniState->rssiThrLow,
+ aniState->rssiThrHigh);
+
+ if ((ah->opmode == NL80211_IFTYPE_STATION ||
+ ah->opmode == NL80211_IFTYPE_ADHOC) &&
+ aniState->noiseFloor <= aniState->rssiThrLow &&
+ immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
+ immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
+
+ aniState->cckNoiseImmunityLevel = immunityLevel;
+
+ entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
+ entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
+
+ if (aniState->firstepLevel != entry_cck->fir_step_level &&
+ entry_cck->fir_step_level >= entry_ofdm->fir_step_level)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_FIRSTEP_LEVEL,
+ entry_cck->fir_step_level);
+
+ /* Skip MRC CCK for pre AR9003 families */
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ return;
+
+ if (aniState->mrcCCKOff == entry_cck->mrc_cck_on)
+ ath9k_hw_ani_control(ah,
+ ATH9K_ANI_MRC_CCK,
+ entry_cck->mrc_cck_on);
+}
+
+static void ath9k_hw_ani_cck_err_trigger_new(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ah->curani;
+
+ if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1);
+}
+
+static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
int32_t rssi;
@@ -259,9 +503,53 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
}
}
+/*
+ * only lower either OFDM or CCK errors per turn
+ * we lower the other one next time
+ */
+static void ath9k_hw_ani_lower_immunity_new(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+
+ aniState = ah->curani;
+
+ /* lower OFDM noise immunity */
+ if (aniState->ofdmNoiseImmunityLevel > 0 &&
+ (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) {
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1);
+ return;
+ }
+
+ /* lower CCK noise immunity */
+ if (aniState->cckNoiseImmunityLevel > 0)
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1);
+}
+
+static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah)
+{
+ struct ath9k_channel *chan = ah->curchan;
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+ u8 clockrate; /* in MHz */
+
+ if (!ah->curchan) /* should really check for CCK instead */
+ clockrate = ATH9K_CLOCK_RATE_CCK;
+ else if (conf->channel->band == IEEE80211_BAND_2GHZ)
+ clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
+ else if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
+ else
+ clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
+
+ if (conf_is_ht40(conf))
+ return clockrate * 2;
+
+ return clockrate * 2;
+}
+
static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
u32 txFrameCount, rxFrameCount, cycleCount;
int32_t listenTime;
@@ -271,15 +559,31 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
aniState = ah->curani;
if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
-
listenTime = 0;
ah->stats.ast_ani_lzero++;
+ ath_print(common, ATH_DBG_ANI,
+ "1st call: aniState->cycleCount=%d\n",
+ aniState->cycleCount);
} else {
int32_t ccdelta = cycleCount - aniState->cycleCount;
int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
int32_t tfdelta = txFrameCount - aniState->txFrameCount;
- listenTime = (ccdelta - rfdelta - tfdelta) / 44000;
+ int32_t clock_rate;
+
+ /*
+ * convert HW counter values to ms using mode
+ * specifix clock rate
+ */
+ clock_rate = ath9k_hw_chan_2_clockrate_mhz(ah) * 1000;;
+
+ listenTime = (ccdelta - rfdelta - tfdelta) / clock_rate;
+
+ ath_print(common, ATH_DBG_ANI,
+ "cyclecount=%d, rfcount=%d, "
+ "tfcount=%d, listenTime=%d CLOCK_RATE=%d\n",
+ ccdelta, rfdelta, tfdelta, listenTime, clock_rate);
}
+
aniState->cycleCount = cycleCount;
aniState->txFrameCount = txFrameCount;
aniState->rxFrameCount = rxFrameCount;
@@ -287,7 +591,7 @@ static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
return listenTime;
}
-void ath9k_ani_reset(struct ath_hw *ah)
+static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
{
struct ar5416AniState *aniState;
struct ath9k_channel *chan = ah->curchan;
@@ -340,7 +644,7 @@ void ath9k_ani_reset(struct ath_hw *ah)
ah->curani->cckTrigLow =
ah->config.cck_trig_low;
}
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
return;
}
@@ -362,7 +666,7 @@ void ath9k_ani_reset(struct ath_hw *ah)
ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
~ATH9K_RX_FILTER_PHYERR);
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
ENABLE_REGWRITE_BUFFER(ah);
@@ -373,8 +677,102 @@ void ath9k_ani_reset(struct ath_hw *ah)
DISABLE_REGWRITE_BUFFER(ah);
}
-void ath9k_hw_ani_monitor(struct ath_hw *ah,
- struct ath9k_channel *chan)
+/*
+ * Restore the ANI parameters in the HAL and reset the statistics.
+ * This routine should be called for every hardware reset and for
+ * every channel change.
+ */
+static void ath9k_ani_reset_new(struct ath_hw *ah, bool is_scanning)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath9k_channel *chan = ah->curchan;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!DO_ANI(ah))
+ return;
+
+ BUG_ON(aniState == NULL);
+ ah->stats.ast_ani_reset++;
+
+ /* only allow a subset of functions in AP mode */
+ if (ah->opmode == NL80211_IFTYPE_AP) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
+ ATH9K_ANI_FIRSTEP_LEVEL);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->ani_function |= ATH9K_ANI_MRC_CCK;
+ } else
+ ah->ani_function = 0;
+ }
+
+ /* always allow mode (on/off) to be controlled */
+ ah->ani_function |= ATH9K_ANI_MODE;
+
+ if (is_scanning ||
+ (ah->opmode != NL80211_IFTYPE_STATION &&
+ ah->opmode != NL80211_IFTYPE_ADHOC)) {
+ /*
+ * If we're scanning or in AP mode, the defaults (ini)
+ * should be in place. For an AP we assume the historical
+ * levels for this channel are probably outdated so start
+ * from defaults instead.
+ */
+ if (aniState->ofdmNoiseImmunityLevel !=
+ ATH9K_ANI_OFDM_DEF_LEVEL ||
+ aniState->cckNoiseImmunityLevel !=
+ ATH9K_ANI_CCK_DEF_LEVEL) {
+ ath_print(common, ATH_DBG_ANI,
+ "Restore defaults: opmode %u "
+ "chan %d Mhz/0x%x is_scanning=%d "
+ "ofdm:%d cck:%d\n",
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags,
+ is_scanning,
+ aniState->ofdmNoiseImmunityLevel,
+ aniState->cckNoiseImmunityLevel);
+
+ ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL);
+ ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
+ }
+ } else {
+ /*
+ * restore historical levels for this channel
+ */
+ ath_print(common, ATH_DBG_ANI,
+ "Restore history: opmode %u "
+ "chan %d Mhz/0x%x is_scanning=%d "
+ "ofdm:%d cck:%d\n",
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags,
+ is_scanning,
+ aniState->ofdmNoiseImmunityLevel,
+ aniState->cckNoiseImmunityLevel);
+
+ ath9k_hw_set_ofdm_nil(ah,
+ aniState->ofdmNoiseImmunityLevel);
+ ath9k_hw_set_cck_nil(ah,
+ aniState->cckNoiseImmunityLevel);
+ }
+
+ /*
+ * enable phy counters if hw supports or if not, enable phy
+ * interrupts (so we can count each one)
+ */
+ ath9k_ani_restart_new(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static void ath9k_hw_ani_monitor_old(struct ath_hw *ah,
+ struct ath9k_channel *chan)
{
struct ar5416AniState *aniState;
struct ath_common *common = ath9k_hw_common(ah);
@@ -390,7 +788,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
listenTime = ath9k_hw_ani_get_listen_time(ah);
if (listenTime < 0) {
ah->stats.ast_ani_lneg++;
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
return;
}
@@ -444,21 +842,166 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
aniState->cckPhyErrCount <= aniState->listenTime *
aniState->cckTrigLow / 1000)
ath9k_hw_ani_lower_immunity(ah);
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
} else if (aniState->listenTime > ah->aniperiod) {
if (aniState->ofdmPhyErrCount > aniState->listenTime *
aniState->ofdmTrigHigh / 1000) {
- ath9k_hw_ani_ofdm_err_trigger(ah);
- ath9k_ani_restart(ah);
+ ath9k_hw_ani_ofdm_err_trigger_old(ah);
+ ath9k_ani_restart_old(ah);
} else if (aniState->cckPhyErrCount >
aniState->listenTime * aniState->cckTrigHigh /
1000) {
- ath9k_hw_ani_cck_err_trigger(ah);
- ath9k_ani_restart(ah);
+ ath9k_hw_ani_cck_err_trigger_old(ah);
+ ath9k_ani_restart_old(ah);
+ }
+ }
+}
+
+static void ath9k_hw_ani_monitor_new(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int32_t listenTime;
+ u32 phyCnt1, phyCnt2;
+ u32 ofdmPhyErrCnt, cckPhyErrCnt;
+ u32 ofdmPhyErrRate, cckPhyErrRate;
+
+ if (!DO_ANI(ah))
+ return;
+
+ aniState = ah->curani;
+ if (WARN_ON(!aniState))
+ return;
+
+ listenTime = ath9k_hw_ani_get_listen_time(ah);
+ if (listenTime <= 0) {
+ ah->stats.ast_ani_lneg++;
+ /* restart ANI period if listenTime is invalid */
+ ath_print(common, ATH_DBG_ANI,
+ "listenTime=%d - on new ani monitor\n",
+ listenTime);
+ ath9k_ani_restart_new(ah);
+ return;
+ }
+
+ aniState->listenTime += listenTime;
+
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+ phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
+ phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
+
+ if (phyCnt1 < aniState->ofdmPhyErrBase ||
+ phyCnt2 < aniState->cckPhyErrBase) {
+ if (phyCnt1 < aniState->ofdmPhyErrBase) {
+ ath_print(common, ATH_DBG_ANI,
+ "phyCnt1 0x%x, resetting "
+ "counter value to 0x%x\n",
+ phyCnt1,
+ aniState->ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_1,
+ aniState->ofdmPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_1,
+ AR_PHY_ERR_OFDM_TIMING);
+ }
+ if (phyCnt2 < aniState->cckPhyErrBase) {
+ ath_print(common, ATH_DBG_ANI,
+ "phyCnt2 0x%x, resetting "
+ "counter value to 0x%x\n",
+ phyCnt2,
+ aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_2,
+ aniState->cckPhyErrBase);
+ REG_WRITE(ah, AR_PHY_ERR_MASK_2,
+ AR_PHY_ERR_CCK_TIMING);
+ }
+ return;
+ }
+
+ ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
+ ah->stats.ast_ani_ofdmerrs +=
+ ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
+ aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
+
+ cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
+ ah->stats.ast_ani_cckerrs +=
+ cckPhyErrCnt - aniState->cckPhyErrCount;
+ aniState->cckPhyErrCount = cckPhyErrCnt;
+
+ ath_print(common, ATH_DBG_ANI,
+ "Errors: OFDM=0x%08x-0x%08x=%d "
+ "CCK=0x%08x-0x%08x=%d\n",
+ phyCnt1,
+ aniState->ofdmPhyErrBase,
+ ofdmPhyErrCnt,
+ phyCnt2,
+ aniState->cckPhyErrBase,
+ cckPhyErrCnt);
+
+ ofdmPhyErrRate = aniState->ofdmPhyErrCount * 1000 /
+ aniState->listenTime;
+ cckPhyErrRate = aniState->cckPhyErrCount * 1000 /
+ aniState->listenTime;
+
+ ath_print(common, ATH_DBG_ANI,
+ "listenTime=%d OFDM:%d errs=%d/s CCK:%d "
+ "errs=%d/s ofdm_turn=%d\n",
+ listenTime, aniState->ofdmNoiseImmunityLevel,
+ ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
+ cckPhyErrRate, aniState->ofdmsTurn);
+
+ if (aniState->listenTime > 5 * ah->aniperiod) {
+ if (ofdmPhyErrRate <= aniState->ofdmTrigLow &&
+ cckPhyErrRate <= aniState->cckTrigLow) {
+ ath_print(common, ATH_DBG_ANI,
+ "1. listenTime=%d OFDM:%d errs=%d/s(<%d) "
+ "CCK:%d errs=%d/s(<%d) -> "
+ "ath9k_hw_ani_lower_immunity()\n",
+ aniState->listenTime,
+ aniState->ofdmNoiseImmunityLevel,
+ ofdmPhyErrRate,
+ aniState->ofdmTrigLow,
+ aniState->cckNoiseImmunityLevel,
+ cckPhyErrRate,
+ aniState->cckTrigLow);
+ ath9k_hw_ani_lower_immunity(ah);
+ aniState->ofdmsTurn = !aniState->ofdmsTurn;
+ }
+ ath_print(common, ATH_DBG_ANI,
+ "1 listenTime=%d ofdm=%d/s cck=%d/s - "
+ "calling ath9k_ani_restart_new()\n",
+ aniState->listenTime, ofdmPhyErrRate, cckPhyErrRate);
+ ath9k_ani_restart_new(ah);
+ } else if (aniState->listenTime > ah->aniperiod) {
+ /* check to see if need to raise immunity */
+ if (ofdmPhyErrRate > aniState->ofdmTrigHigh &&
+ (cckPhyErrRate <= aniState->cckTrigHigh ||
+ aniState->ofdmsTurn)) {
+ ath_print(common, ATH_DBG_ANI,
+ "2 listenTime=%d OFDM:%d errs=%d/s(>%d) -> "
+ "ath9k_hw_ani_ofdm_err_trigger_new()\n",
+ aniState->listenTime,
+ aniState->ofdmNoiseImmunityLevel,
+ ofdmPhyErrRate,
+ aniState->ofdmTrigHigh);
+ ath9k_hw_ani_ofdm_err_trigger_new(ah);
+ ath9k_ani_restart_new(ah);
+ aniState->ofdmsTurn = false;
+ } else if (cckPhyErrRate > aniState->cckTrigHigh) {
+ ath_print(common, ATH_DBG_ANI,
+ "3 listenTime=%d CCK:%d errs=%d/s(>%d) -> "
+ "ath9k_hw_ani_cck_err_trigger_new()\n",
+ aniState->listenTime,
+ aniState->cckNoiseImmunityLevel,
+ cckPhyErrRate,
+ aniState->cckTrigHigh);
+ ath9k_hw_ani_cck_err_trigger_new(ah);
+ ath9k_ani_restart_new(ah);
+ aniState->ofdmsTurn = true;
}
}
}
-EXPORT_SYMBOL(ath9k_hw_ani_monitor);
void ath9k_enable_mib_counters(struct ath_hw *ah)
{
@@ -495,6 +1038,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
REG_WRITE(ah, AR_FILT_OFDM, 0);
REG_WRITE(ah, AR_FILT_CCK, 0);
}
+EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
u32 *rxc_pcnt,
@@ -542,7 +1086,7 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
* any of the MIB counters overflow/trigger so don't assume we're
* here because a PHY error counter triggered.
*/
-void ath9k_hw_procmibevent(struct ath_hw *ah)
+static void ath9k_hw_proc_mib_event_old(struct ath_hw *ah)
{
u32 phyCnt1, phyCnt2;
@@ -555,8 +1099,15 @@ void ath9k_hw_procmibevent(struct ath_hw *ah)
/* Clear the mib counters and save them in the stats */
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
- if (!DO_ANI(ah))
+ if (!DO_ANI(ah)) {
+ /*
+ * We must always clear the interrupt cause by
+ * resetting the phy error regs.
+ */
+ REG_WRITE(ah, AR_PHY_ERR_1, 0);
+ REG_WRITE(ah, AR_PHY_ERR_2, 0);
return;
+ }
/* NB: these are not reset-on-read */
phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
@@ -584,14 +1135,51 @@ void ath9k_hw_procmibevent(struct ath_hw *ah)
* check will never be true.
*/
if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
- ath9k_hw_ani_ofdm_err_trigger(ah);
+ ath9k_hw_ani_ofdm_err_trigger_new(ah);
if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
- ath9k_hw_ani_cck_err_trigger(ah);
+ ath9k_hw_ani_cck_err_trigger_old(ah);
/* NB: always restart to insure the h/w counters are reset */
- ath9k_ani_restart(ah);
+ ath9k_ani_restart_old(ah);
}
}
-EXPORT_SYMBOL(ath9k_hw_procmibevent);
+
+/*
+ * Process a MIB interrupt. We may potentially be invoked because
+ * any of the MIB counters overflow/trigger so don't assume we're
+ * here because a PHY error counter triggered.
+ */
+static void ath9k_hw_proc_mib_event_new(struct ath_hw *ah)
+{
+ u32 phyCnt1, phyCnt2;
+
+ /* Reset these counters regardless */
+ REG_WRITE(ah, AR_FILT_OFDM, 0);
+ REG_WRITE(ah, AR_FILT_CCK, 0);
+ if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
+ REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
+
+ /* Clear the mib counters and save them in the stats */
+ ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+
+ if (!DO_ANI(ah)) {
+ /*
+ * We must always clear the interrupt cause by
+ * resetting the phy error regs.
+ */
+ REG_WRITE(ah, AR_PHY_ERR_1, 0);
+ REG_WRITE(ah, AR_PHY_ERR_2, 0);
+ return;
+ }
+
+ /* NB: these are not reset-on-read */
+ phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
+ phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
+
+ /* NB: always restart to insure the h/w counters are reset */
+ if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
+ ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK))
+ ath9k_ani_restart_new(ah);
+}
void ath9k_hw_ani_setup(struct ath_hw *ah)
{
@@ -619,22 +1207,70 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
memset(ah->ani, 0, sizeof(ah->ani));
for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
- ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
- ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
- ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
- ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
+ if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) {
+ ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
+ ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
+
+ ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_NEW;
+ ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_NEW;
+
+ ah->ani[i].spurImmunityLevel =
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+
+ ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+
+ ah->ani[i].ofdmPhyErrBase = 0;
+ ah->ani[i].cckPhyErrBase = 0;
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->ani[i].mrcCCKOff =
+ !ATH9K_ANI_ENABLE_MRC_CCK;
+ else
+ ah->ani[i].mrcCCKOff = true;
+
+ ah->ani[i].ofdmsTurn = true;
+ } else {
+ ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+ ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
+
+ ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+ ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_OLD;
+
+ ah->ani[i].spurImmunityLevel =
+ ATH9K_ANI_SPUR_IMMUNE_LVL_OLD;
+ ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD;
+
+ ah->ani[i].ofdmPhyErrBase =
+ AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+ ah->ani[i].cckPhyErrBase =
+ AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+ ah->ani[i].cckWeakSigThreshold =
+ ATH9K_ANI_CCK_WEAK_SIG_THR;
+ }
+
ah->ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
ah->ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
ah->ani[i].ofdmWeakSigDetectOff =
!ATH9K_ANI_USE_OFDM_WEAK_SIG;
- ah->ani[i].cckWeakSigThreshold =
- ATH9K_ANI_CCK_WEAK_SIG_THR;
- ah->ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
- ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
- ah->ani[i].ofdmPhyErrBase =
- AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
- ah->ani[i].cckPhyErrBase =
- AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
+ ah->ani[i].cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+ }
+
+ /*
+ * since we expect some ongoing maintenance on the tables, let's sanity
+ * check here default level should not modify INI setting.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) {
+ const struct ani_ofdm_level_entry *entry_ofdm;
+ const struct ani_cck_level_entry *entry_cck;
+
+ entry_ofdm = &ofdm_level_table[ATH9K_ANI_OFDM_DEF_LEVEL];
+ entry_cck = &cck_level_table[ATH9K_ANI_CCK_DEF_LEVEL];
+
+ ah->aniperiod = ATH9K_ANI_PERIOD_NEW;
+ ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW;
+ } else {
+ ah->aniperiod = ATH9K_ANI_PERIOD_OLD;
+ ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD;
}
ath_print(common, ATH_DBG_ANI,
@@ -653,7 +1289,34 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath9k_enable_mib_counters(ah);
- ah->aniperiod = ATH9K_ANI_PERIOD;
if (ah->config.enable_ani)
ah->proc_phyerr |= HAL_PROCESS_ANI;
}
+
+void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->ani_reset = ath9k_ani_reset_old;
+ priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_old;
+
+ ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_old;
+ ops->ani_monitor = ath9k_hw_ani_monitor_old;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v1\n");
+}
+
+void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->ani_reset = ath9k_ani_reset_new;
+ priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_new;
+
+ ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_new;
+ ops->ani_monitor = ath9k_hw_ani_monitor_new;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v2\n");
+}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 3356762ea384..f4d0a4d48b37 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -23,23 +23,55 @@
#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
-#define ATH9K_ANI_OFDM_TRIG_HIGH 500
-#define ATH9K_ANI_OFDM_TRIG_LOW 200
-#define ATH9K_ANI_CCK_TRIG_HIGH 200
-#define ATH9K_ANI_CCK_TRIG_LOW 100
+/* units are errors per second */
+#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
+#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 1000
+
+/* units are errors per second */
+#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
+#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
+
+/* units are errors per second */
+#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
+#define ATH9K_ANI_CCK_TRIG_HIGH_NEW 600
+
+/* units are errors per second */
+#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100
+#define ATH9K_ANI_CCK_TRIG_LOW_NEW 300
+
#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
#define ATH9K_ANI_CCK_WEAK_SIG_THR false
-#define ATH9K_ANI_SPUR_IMMUNE_LVL 7
-#define ATH9K_ANI_FIRSTEP_LVL 0
+
+#define ATH9K_ANI_SPUR_IMMUNE_LVL_OLD 7
+#define ATH9K_ANI_SPUR_IMMUNE_LVL_NEW 3
+
+#define ATH9K_ANI_FIRSTEP_LVL_OLD 0
+#define ATH9K_ANI_FIRSTEP_LVL_NEW 2
+
#define ATH9K_ANI_RSSI_THR_HIGH 40
#define ATH9K_ANI_RSSI_THR_LOW 7
-#define ATH9K_ANI_PERIOD 100
+
+#define ATH9K_ANI_PERIOD_OLD 100
+#define ATH9K_ANI_PERIOD_NEW 1000
+
+/* in ms */
+#define ATH9K_ANI_POLLINTERVAL_OLD 100
+#define ATH9K_ANI_POLLINTERVAL_NEW 1000
#define HAL_NOISE_IMMUNE_MAX 4
#define HAL_SPUR_IMMUNE_MAX 7
#define HAL_FIRST_STEP_MAX 2
+#define ATH9K_SIG_FIRSTEP_SETTING_MIN 0
+#define ATH9K_SIG_FIRSTEP_SETTING_MAX 20
+#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
+#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22
+
+#define ATH9K_ANI_ENABLE_MRC_CCK true
+
+/* values here are relative to the INI */
+
enum ath9k_ani_cmd {
ATH9K_ANI_PRESENT = 0x1,
ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
@@ -49,7 +81,8 @@ enum ath9k_ani_cmd {
ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
ATH9K_ANI_MODE = 0x40,
ATH9K_ANI_PHYERR_RESET = 0x80,
- ATH9K_ANI_ALL = 0xff
+ ATH9K_ANI_MRC_CCK = 0x100,
+ ATH9K_ANI_ALL = 0xfff
};
struct ath9k_mib_stats {
@@ -60,9 +93,31 @@ struct ath9k_mib_stats {
u32 beacons;
};
+/* INI default values for ANI registers */
+struct ath9k_ani_default {
+ u16 m1ThreshLow;
+ u16 m2ThreshLow;
+ u16 m1Thresh;
+ u16 m2Thresh;
+ u16 m2CountThr;
+ u16 m2CountThrLow;
+ u16 m1ThreshLowExt;
+ u16 m2ThreshLowExt;
+ u16 m1ThreshExt;
+ u16 m2ThreshExt;
+ u16 firstep;
+ u16 firstepLow;
+ u16 cycpwrThr1;
+ u16 cycpwrThr1Ext;
+};
+
struct ar5416AniState {
struct ath9k_channel *c;
u8 noiseImmunityLevel;
+ u8 ofdmNoiseImmunityLevel;
+ u8 cckNoiseImmunityLevel;
+ bool ofdmsTurn;
+ u8 mrcCCKOff;
u8 spurImmunityLevel;
u8 firstepLevel;
u8 ofdmWeakSigDetectOff;
@@ -85,6 +140,7 @@ struct ar5416AniState {
int16_t pktRssi[2];
int16_t ofdmErrRssi[2];
int16_t cckErrRssi[2];
+ struct ath9k_ani_default iniDef;
};
struct ar5416Stats {
@@ -108,15 +164,13 @@ struct ar5416Stats {
};
#define ah_mibStats stats.ast_mibstats
-void ath9k_ani_reset(struct ath_hw *ah);
-void ath9k_hw_ani_monitor(struct ath_hw *ah,
- struct ath9k_channel *chan);
void ath9k_enable_mib_counters(struct ath_hw *ah);
void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
u32 *rxf_pcnt, u32 *txf_pcnt);
-void ath9k_hw_procmibevent(struct ath_hw *ah);
void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
+int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
+ struct ath9k_channel *chan);
#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index b2c17c98bb38..ee34a495b0be 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -19,7 +19,30 @@
#include "../regd.h"
#include "ar9002_phy.h"
-/* All code below is for non single-chip solutions */
+/* All code below is for AR5008, AR9001, AR9002 */
+
+static const int firstep_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
+
+static const int cycpwrThr1_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
+
+/*
+ * register values to turn OFDM weak signal detection OFF
+ */
+static const int m1ThreshLow_off = 127;
+static const int m2ThreshLow_off = 127;
+static const int m1Thresh_off = 127;
+static const int m2Thresh_off = 127;
+static const int m2CountThr_off = 31;
+static const int m2CountThrLow_off = 63;
+static const int m1ThreshLowExt_off = 127;
+static const int m2ThreshLowExt_off = 127;
+static const int m1ThreshExt_off = 127;
+static const int m2ThreshExt_off = 127;
+
/**
* ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
@@ -742,17 +765,6 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
return -EINVAL;
}
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- /* Enable ASYNC FIFO */
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
- REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
- REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- }
-
/*
* Set correct baseband to analog shift setting to
* access analog chips.
@@ -1037,8 +1049,9 @@ static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
return pll;
}
-static bool ar5008_hw_ani_control(struct ath_hw *ah,
- enum ath9k_ani_cmd cmd, int param)
+static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd,
+ int param)
{
struct ar5416AniState *aniState = ah->curani;
struct ath_common *common = ath9k_hw_common(ah);
@@ -1220,6 +1233,265 @@ static bool ar5008_hw_ani_control(struct ath_hw *ah,
return true;
}
+static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd,
+ int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ s32 value, value2;
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ /*
+ * on == 1 means ofdm weak signal detection is ON
+ * on == 1 is the default, for less noise immunity
+ *
+ * on == 0 means ofdm weak signal detection is OFF
+ * on == 0 means more noise imm
+ */
+ u32 on = param ? 1 : 0;
+ /*
+ * make register setting for default
+ * (weak sig detect ON) come from INI file
+ */
+ int m1ThreshLow = on ?
+ aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+ int m2ThreshLow = on ?
+ aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+ int m1Thresh = on ?
+ aniState->iniDef.m1Thresh : m1Thresh_off;
+ int m2Thresh = on ?
+ aniState->iniDef.m2Thresh : m2Thresh_off;
+ int m2CountThr = on ?
+ aniState->iniDef.m2CountThr : m2CountThr_off;
+ int m2CountThrLow = on ?
+ aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+ int m1ThreshLowExt = on ?
+ aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+ int m2ThreshLowExt = on ?
+ aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+ int m1ThreshExt = on ?
+ aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+ int m2ThreshExt = on ?
+ aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH, m1Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH, m2Thresh);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: ofdm weak signal: %s=>%s\n",
+ chan->channel,
+ !aniState->ofdmWeakSigDetectOff ?
+ "on" : "off",
+ on ? "on" : "off");
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep_table)) {
+ ath_print(common, ATH_DBG_ANI,
+ "ATH9K_ANI_FIRSTEP_LEVEL: level "
+ "out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep_table));
+ return false;
+ }
+
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ aniState->iniDef.firstep;
+ if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ value);
+ /*
+ * we need to set first step low register too
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ aniState->iniDef.firstepLow;
+ if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
+
+ if (level != aniState->firstepLevel) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "firstep[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value,
+ aniState->iniDef.firstep);
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "firstep_low[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value2,
+ aniState->iniDef.firstepLow);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ }
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
+ ath_print(common, ATH_DBG_ANI,
+ "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
+ "out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1_table));
+ return false;
+ }
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ aniState->iniDef.cycpwrThr1;
+ if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ value);
+
+ /*
+ * set AR_PHY_EXT_CCA for extension channel
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ aniState->iniDef.cycpwrThr1Ext;
+ if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
+
+ if (level != aniState->spurImmunityLevel) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "cycpwrThr1[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value,
+ aniState->iniDef.cycpwrThr1);
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "cycpwrThr1Ext[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value2,
+ aniState->iniDef.cycpwrThr1Ext);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ }
+ break;
+ }
+ case ATH9K_ANI_MRC_CCK:
+ /*
+ * You should not see this as AR5008, AR9001, AR9002
+ * does not have hardware support for MRC CCK.
+ */
+ WARN_ON(1);
+ break;
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI,
+ "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
+ "MRCcck=%s listenTime=%d CC=%d listen=%d "
+ "ofdmErrs=%d cckErrs=%d\n",
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff ? "on" : "off",
+ aniState->firstepLevel,
+ !aniState->mrcCCKOff ? "on" : "off",
+ aniState->listenTime,
+ aniState->cycleCount,
+ aniState->listenTime,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+ return true;
+}
+
static void ar5008_hw_do_getnf(struct ath_hw *ah,
int16_t nfarray[NUM_NF_READINGS])
{
@@ -1340,6 +1612,71 @@ static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
DISABLE_REGWRITE_BUFFER(ah);
}
+/*
+ * Initialize the ANI register values with default (ini) values.
+ * This routine is called during a (full) hardware reset after
+ * all the registers are initialised from the INI.
+ */
+static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ struct ath9k_ani_default *iniDef;
+ int index;
+ u32 val;
+
+ index = ath9k_hw_get_ani_channel_idx(ah, chan);
+ aniState = &ah->ani[index];
+ ah->curani = aniState;
+ iniDef = &aniState->iniDef;
+
+ ath_print(common, ATH_DBG_ANI,
+ "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ah->hw_version.macVersion,
+ ah->hw_version.macRev,
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags);
+
+ val = REG_READ(ah, AR_PHY_SFCORR);
+ iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
+ iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
+ iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_LOW);
+ iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
+ iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
+ iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_EXT);
+ iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
+ iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
+ iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
+ iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
+ iniDef->firstep = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP);
+ iniDef->firstepLow = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_FIRSTEP_LOW);
+ iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
+ AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1);
+ iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_TIMING5_CYCPWR_THR1);
+
+ /* these levels just got reset to defaults by the INI */
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+ aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->mrcCCKOff = true; /* not available on pre AR9003 */
+
+ aniState->cycleCount = 0;
+}
+
+
void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1361,10 +1698,15 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
priv_ops->restore_chainmask = ar5008_restore_chainmask;
priv_ops->set_diversity = ar5008_set_diversity;
- priv_ops->ani_control = ar5008_hw_ani_control;
priv_ops->do_getnf = ar5008_hw_do_getnf;
priv_ops->loadnf = ar5008_hw_loadnf;
+ if (modparam_force_new_ani) {
+ priv_ops->ani_control = ar5008_hw_ani_control_new;
+ priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
+ } else
+ priv_ops->ani_control = ar5008_hw_ani_control_old;
+
if (AR_SREV_9100(ah))
priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
else if (AR_SREV_9160_10_OR_LATER(ah))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index a8a8cdc04afa..0317ac9fc1b7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -18,6 +18,11 @@
#include "ar5008_initvals.h"
#include "ar9001_initvals.h"
#include "ar9002_initvals.h"
+#include "ar9002_phy.h"
+
+int modparam_force_new_ani;
+module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Force new ANI for AR5008, AR9001, AR9002");
/* General hardware code for the A5008/AR9001/AR9002 hadware families */
@@ -436,55 +441,84 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
}
udelay(1000);
+ }
- /* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ if (power_off) {
+ /* clear bit 19 to disable L1 */
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ val = REG_READ(ah, AR_WA);
+
+ /*
+ * Set PCIe workaround bits
+ * In AR9280 and AR9285, bit 14 in WA register (disable L1)
+ * should only be set when device enters D3 and be
+ * cleared when device comes back to D0.
+ */
+ if (ah->config.pcie_waen) {
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ val |= AR_WA_D3_L1_DISABLE;
+ } else {
+ if (((AR_SREV_9285(ah) ||
+ AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) &&
+ (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
+ (AR_SREV_9280(ah) &&
+ (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
+ val |= AR_WA_D3_L1_DISABLE;
+ }
+ }
+
+ if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
+ /*
+ * Disable bit 6 and 7 before entering D3 to
+ * prevent system hang.
+ */
+ val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
+ }
- /* Several PCIe massages to ensure proper behaviour */
+ if (AR_SREV_9285E_20(ah))
+ val |= AR_WA_BIT23;
+
+ REG_WRITE(ah, AR_WA, val);
+ } else {
if (ah->config.pcie_waen) {
val = ah->config.pcie_waen;
if (!power_off)
val &= (~AR_WA_D3_L1_DISABLE);
} else {
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ if (AR_SREV_9285(ah) ||
+ AR_SREV_9271(ah) ||
AR_SREV_9287(ah)) {
val = AR9285_WA_DEFAULT;
if (!power_off)
val &= (~AR_WA_D3_L1_DISABLE);
- } else if (AR_SREV_9280(ah)) {
+ }
+ else if (AR_SREV_9280(ah)) {
/*
- * On AR9280 chips bit 22 of 0x4004 needs to be
- * set otherwise card may disappear.
+ * For AR9280 chips, bit 22 of 0x4004
+ * needs to be set.
*/
val = AR9280_WA_DEFAULT;
if (!power_off)
val &= (~AR_WA_D3_L1_DISABLE);
- } else
+ } else {
val = AR_WA_DEFAULT;
+ }
+ }
+
+ /* WAR for ASPM system hang */
+ if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
+ val |= (AR_WA_BIT6 | AR_WA_BIT7);
}
+ if (AR_SREV_9285E_20(ah))
+ val |= AR_WA_BIT23;
+
REG_WRITE(ah, AR_WA, val);
- }
- if (power_off) {
- /*
- * Set PCIe workaround bits
- * bit 14 in WA register (disable L1) should only
- * be set when device enters D3 and be cleared
- * when device comes back to D0.
- */
- if (ah->config.pcie_waen) {
- if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- } else {
- if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) &&
- (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
- (AR_SREV_9280(ah) &&
- (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- }
- }
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
}
}
@@ -536,18 +570,29 @@ int ar9002_hw_rf_claim(struct ath_hw *ah)
return 0;
}
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_13_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
+ REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
+ REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ }
+}
+
/*
- * Enable ASYNC FIFO
- *
* If Async FIFO is enabled, the following counters change as MAC now runs
* at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
*
* The values below tested for ht40 2 chain.
* Overwrite the delay/timeouts initialized in process ini.
*/
-void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
+void ar9002_hw_update_async_fifo(struct ath_hw *ah)
{
- if (AR_SREV_9287_12_OR_LATER(ah)) {
+ if (AR_SREV_9287_13_OR_LATER(ah)) {
REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
@@ -571,9 +616,9 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
*/
void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
{
- if (AR_SREV_9287_12_OR_LATER(ah)) {
+ if (AR_SREV_9287_13_OR_LATER(ah)) {
REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
- AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
}
}
@@ -595,4 +640,9 @@ void ar9002_hw_attach_ops(struct ath_hw *ah)
ar9002_hw_attach_calib_ops(ah);
ar9002_hw_attach_mac_ops(ah);
+
+ if (modparam_force_new_ani)
+ ath9k_hw_attach_ani_ops_new(ah);
+ else
+ ath9k_hw_attach_ani_ops_old(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index dae7f3304eb8..8ab24ee8564b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -4492,7 +4492,7 @@ static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
};
-/* AR9271 initialization values automaticaly created: 06/04/09 */
+/* AR9271 initialization values automaticaly created: 03/31/10 */
static const u32 ar9271Modes_9271[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -5011,7 +5011,7 @@ static const u32 ar9271Common_9271[][2] = {
{ 0x0000783c, 0x72ee0a72 },
{ 0x00007840, 0xbbfffffc },
{ 0x00007844, 0x000c0db6 },
- { 0x00007848, 0x6db61b6f },
+ { 0x00007848, 0x6db6246f },
{ 0x0000784c, 0x6d9b66db },
{ 0x00007850, 0x6d8c6dba },
{ 0x00007854, 0x00040000 },
@@ -5218,7 +5218,7 @@ static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = {
{ 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff },
{ 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 },
{ 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
- { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 },
+ { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a214652, 0x0a214652, 0x0a22a652 },
{ 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
{ 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 },
{ 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 81bf6e5840e1..ce8bb001c6d1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -114,6 +114,10 @@
#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
#define AR_PHY_FIND_SIG_FIRPWR_S 18
+#define AR_PHY_FIND_SIG_LOW 0x9840
+#define AR_PHY_FIND_SIG_FIRSTEP_LOW 0x00000FC0L
+#define AR_PHY_FIND_SIG_FIRSTEP_LOW_S 6
+
#define AR_PHY_AGC_CTL1 0x985C
#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
@@ -325,6 +329,9 @@
#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_TIMING5_CYCPWR_THR1 0x0000FE00L
+#define AR_PHY_EXT_TIMING5_CYCPWR_THR1_S 9
+
#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
#define AR_PHY_EXT_MINCCA_PWR_S 23
#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
index db019dd220b7..d3375fc4ce8b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
@@ -14,8 +14,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef INITVALS_9003_H
-#define INITVALS_9003_H
+#ifndef INITVALS_9003_2P0_H
+#define INITVALS_9003_2P0_H
/* AR9003 2.0 */
@@ -835,71 +835,71 @@ static const u32 ar9300_2p0_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
- {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
- {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
- {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -913,71 +913,71 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
- {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
- {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
- {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
- {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
- {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
- {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
- {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1781,4 +1781,4 @@ static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
{0x00004044, 0x00000000},
};
-#endif /* INITVALS_9003_H */
+#endif /* INITVALS_9003_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
new file mode 100644
index 000000000000..ec98ab50748a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -0,0 +1,1785 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_2P2_H
+#define INITVALS_9003_2P2_H
+
+/* AR9003 2.2 */
+
+static const u32 ar9300_2p2_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
+ {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
+ {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
+ {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_fast_clock_2p2[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9300_2p2_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x00000004},
+ {0x00016294, 0x458aa14f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar9300Common_rx_gain_table_merlin_2p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x02000101},
+ {0x0000a004, 0x02000102},
+ {0x0000a008, 0x02000103},
+ {0x0000a00c, 0x02000104},
+ {0x0000a010, 0x02000200},
+ {0x0000a014, 0x02000201},
+ {0x0000a018, 0x02000202},
+ {0x0000a01c, 0x02000203},
+ {0x0000a020, 0x02000204},
+ {0x0000a024, 0x02000205},
+ {0x0000a028, 0x02000208},
+ {0x0000a02c, 0x02000302},
+ {0x0000a030, 0x02000303},
+ {0x0000a034, 0x02000304},
+ {0x0000a038, 0x02000400},
+ {0x0000a03c, 0x02010300},
+ {0x0000a040, 0x02010301},
+ {0x0000a044, 0x02010302},
+ {0x0000a048, 0x02000500},
+ {0x0000a04c, 0x02010400},
+ {0x0000a050, 0x02020300},
+ {0x0000a054, 0x02020301},
+ {0x0000a058, 0x02020302},
+ {0x0000a05c, 0x02020303},
+ {0x0000a060, 0x02020400},
+ {0x0000a064, 0x02030300},
+ {0x0000a068, 0x02030301},
+ {0x0000a06c, 0x02030302},
+ {0x0000a070, 0x02030303},
+ {0x0000a074, 0x02030400},
+ {0x0000a078, 0x02040300},
+ {0x0000a07c, 0x02040301},
+ {0x0000a080, 0x02040302},
+ {0x0000a084, 0x02040303},
+ {0x0000a088, 0x02030500},
+ {0x0000a08c, 0x02040400},
+ {0x0000a090, 0x02050203},
+ {0x0000a094, 0x02050204},
+ {0x0000a098, 0x02050205},
+ {0x0000a09c, 0x02040500},
+ {0x0000a0a0, 0x02050301},
+ {0x0000a0a4, 0x02050302},
+ {0x0000a0a8, 0x02050303},
+ {0x0000a0ac, 0x02050400},
+ {0x0000a0b0, 0x02050401},
+ {0x0000a0b4, 0x02050402},
+ {0x0000a0b8, 0x02050403},
+ {0x0000a0bc, 0x02050500},
+ {0x0000a0c0, 0x02050501},
+ {0x0000a0c4, 0x02050502},
+ {0x0000a0c8, 0x02050503},
+ {0x0000a0cc, 0x02050504},
+ {0x0000a0d0, 0x02050600},
+ {0x0000a0d4, 0x02050601},
+ {0x0000a0d8, 0x02050602},
+ {0x0000a0dc, 0x02050603},
+ {0x0000a0e0, 0x02050604},
+ {0x0000a0e4, 0x02050700},
+ {0x0000a0e8, 0x02050701},
+ {0x0000a0ec, 0x02050702},
+ {0x0000a0f0, 0x02050703},
+ {0x0000a0f4, 0x02050704},
+ {0x0000a0f8, 0x02050705},
+ {0x0000a0fc, 0x02050708},
+ {0x0000a100, 0x02050709},
+ {0x0000a104, 0x0205070a},
+ {0x0000a108, 0x0205070b},
+ {0x0000a10c, 0x0205070c},
+ {0x0000a110, 0x0205070d},
+ {0x0000a114, 0x02050710},
+ {0x0000a118, 0x02050711},
+ {0x0000a11c, 0x02050712},
+ {0x0000a120, 0x02050713},
+ {0x0000a124, 0x02050714},
+ {0x0000a128, 0x02050715},
+ {0x0000a12c, 0x02050730},
+ {0x0000a130, 0x02050731},
+ {0x0000a134, 0x02050732},
+ {0x0000a138, 0x02050733},
+ {0x0000a13c, 0x02050734},
+ {0x0000a140, 0x02050735},
+ {0x0000a144, 0x02050750},
+ {0x0000a148, 0x02050751},
+ {0x0000a14c, 0x02050752},
+ {0x0000a150, 0x02050753},
+ {0x0000a154, 0x02050754},
+ {0x0000a158, 0x02050755},
+ {0x0000a15c, 0x02050770},
+ {0x0000a160, 0x02050771},
+ {0x0000a164, 0x02050772},
+ {0x0000a168, 0x02050773},
+ {0x0000a16c, 0x02050774},
+ {0x0000a170, 0x02050775},
+ {0x0000a174, 0x00000776},
+ {0x0000a178, 0x00000776},
+ {0x0000a17c, 0x00000776},
+ {0x0000a180, 0x00000776},
+ {0x0000a184, 0x00000776},
+ {0x0000a188, 0x00000776},
+ {0x0000a18c, 0x00000776},
+ {0x0000a190, 0x00000776},
+ {0x0000a194, 0x00000776},
+ {0x0000a198, 0x00000776},
+ {0x0000a19c, 0x00000776},
+ {0x0000a1a0, 0x00000776},
+ {0x0000a1a4, 0x00000776},
+ {0x0000a1a8, 0x00000776},
+ {0x0000a1ac, 0x00000776},
+ {0x0000a1b0, 0x00000776},
+ {0x0000a1b4, 0x00000776},
+ {0x0000a1b8, 0x00000776},
+ {0x0000a1bc, 0x00000776},
+ {0x0000a1c0, 0x00000776},
+ {0x0000a1c4, 0x00000776},
+ {0x0000a1c8, 0x00000776},
+ {0x0000a1cc, 0x00000776},
+ {0x0000a1d0, 0x00000776},
+ {0x0000a1d4, 0x00000776},
+ {0x0000a1d8, 0x00000776},
+ {0x0000a1dc, 0x00000776},
+ {0x0000a1e0, 0x00000776},
+ {0x0000a1e4, 0x00000776},
+ {0x0000a1e8, 0x00000776},
+ {0x0000a1ec, 0x00000776},
+ {0x0000a1f0, 0x00000776},
+ {0x0000a1f4, 0x00000776},
+ {0x0000a1f8, 0x00000776},
+ {0x0000a1fc, 0x00000776},
+ {0x0000b000, 0x02000101},
+ {0x0000b004, 0x02000102},
+ {0x0000b008, 0x02000103},
+ {0x0000b00c, 0x02000104},
+ {0x0000b010, 0x02000200},
+ {0x0000b014, 0x02000201},
+ {0x0000b018, 0x02000202},
+ {0x0000b01c, 0x02000203},
+ {0x0000b020, 0x02000204},
+ {0x0000b024, 0x02000205},
+ {0x0000b028, 0x02000208},
+ {0x0000b02c, 0x02000302},
+ {0x0000b030, 0x02000303},
+ {0x0000b034, 0x02000304},
+ {0x0000b038, 0x02000400},
+ {0x0000b03c, 0x02010300},
+ {0x0000b040, 0x02010301},
+ {0x0000b044, 0x02010302},
+ {0x0000b048, 0x02000500},
+ {0x0000b04c, 0x02010400},
+ {0x0000b050, 0x02020300},
+ {0x0000b054, 0x02020301},
+ {0x0000b058, 0x02020302},
+ {0x0000b05c, 0x02020303},
+ {0x0000b060, 0x02020400},
+ {0x0000b064, 0x02030300},
+ {0x0000b068, 0x02030301},
+ {0x0000b06c, 0x02030302},
+ {0x0000b070, 0x02030303},
+ {0x0000b074, 0x02030400},
+ {0x0000b078, 0x02040300},
+ {0x0000b07c, 0x02040301},
+ {0x0000b080, 0x02040302},
+ {0x0000b084, 0x02040303},
+ {0x0000b088, 0x02030500},
+ {0x0000b08c, 0x02040400},
+ {0x0000b090, 0x02050203},
+ {0x0000b094, 0x02050204},
+ {0x0000b098, 0x02050205},
+ {0x0000b09c, 0x02040500},
+ {0x0000b0a0, 0x02050301},
+ {0x0000b0a4, 0x02050302},
+ {0x0000b0a8, 0x02050303},
+ {0x0000b0ac, 0x02050400},
+ {0x0000b0b0, 0x02050401},
+ {0x0000b0b4, 0x02050402},
+ {0x0000b0b8, 0x02050403},
+ {0x0000b0bc, 0x02050500},
+ {0x0000b0c0, 0x02050501},
+ {0x0000b0c4, 0x02050502},
+ {0x0000b0c8, 0x02050503},
+ {0x0000b0cc, 0x02050504},
+ {0x0000b0d0, 0x02050600},
+ {0x0000b0d4, 0x02050601},
+ {0x0000b0d8, 0x02050602},
+ {0x0000b0dc, 0x02050603},
+ {0x0000b0e0, 0x02050604},
+ {0x0000b0e4, 0x02050700},
+ {0x0000b0e8, 0x02050701},
+ {0x0000b0ec, 0x02050702},
+ {0x0000b0f0, 0x02050703},
+ {0x0000b0f4, 0x02050704},
+ {0x0000b0f8, 0x02050705},
+ {0x0000b0fc, 0x02050708},
+ {0x0000b100, 0x02050709},
+ {0x0000b104, 0x0205070a},
+ {0x0000b108, 0x0205070b},
+ {0x0000b10c, 0x0205070c},
+ {0x0000b110, 0x0205070d},
+ {0x0000b114, 0x02050710},
+ {0x0000b118, 0x02050711},
+ {0x0000b11c, 0x02050712},
+ {0x0000b120, 0x02050713},
+ {0x0000b124, 0x02050714},
+ {0x0000b128, 0x02050715},
+ {0x0000b12c, 0x02050730},
+ {0x0000b130, 0x02050731},
+ {0x0000b134, 0x02050732},
+ {0x0000b138, 0x02050733},
+ {0x0000b13c, 0x02050734},
+ {0x0000b140, 0x02050735},
+ {0x0000b144, 0x02050750},
+ {0x0000b148, 0x02050751},
+ {0x0000b14c, 0x02050752},
+ {0x0000b150, 0x02050753},
+ {0x0000b154, 0x02050754},
+ {0x0000b158, 0x02050755},
+ {0x0000b15c, 0x02050770},
+ {0x0000b160, 0x02050771},
+ {0x0000b164, 0x02050772},
+ {0x0000b168, 0x02050773},
+ {0x0000b16c, 0x02050774},
+ {0x0000b170, 0x02050775},
+ {0x0000b174, 0x00000776},
+ {0x0000b178, 0x00000776},
+ {0x0000b17c, 0x00000776},
+ {0x0000b180, 0x00000776},
+ {0x0000b184, 0x00000776},
+ {0x0000b188, 0x00000776},
+ {0x0000b18c, 0x00000776},
+ {0x0000b190, 0x00000776},
+ {0x0000b194, 0x00000776},
+ {0x0000b198, 0x00000776},
+ {0x0000b19c, 0x00000776},
+ {0x0000b1a0, 0x00000776},
+ {0x0000b1a4, 0x00000776},
+ {0x0000b1a8, 0x00000776},
+ {0x0000b1ac, 0x00000776},
+ {0x0000b1b0, 0x00000776},
+ {0x0000b1b4, 0x00000776},
+ {0x0000b1b8, 0x00000776},
+ {0x0000b1bc, 0x00000776},
+ {0x0000b1c0, 0x00000776},
+ {0x0000b1c4, 0x00000776},
+ {0x0000b1c8, 0x00000776},
+ {0x0000b1cc, 0x00000776},
+ {0x0000b1d0, 0x00000776},
+ {0x0000b1d4, 0x00000776},
+ {0x0000b1d8, 0x00000776},
+ {0x0000b1dc, 0x00000776},
+ {0x0000b1e0, 0x00000776},
+ {0x0000b1e4, 0x00000776},
+ {0x0000b1e8, 0x00000776},
+ {0x0000b1ec, 0x00000776},
+ {0x0000b1f0, 0x00000776},
+ {0x0000b1f4, 0x00000776},
+ {0x0000b1f8, 0x00000776},
+ {0x0000b1fc, 0x00000776},
+};
+
+static const u32 ar9300_2p2_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9300_2p2_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
+};
+
+static const u32 ar9200_merlin_2p2_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00007800, 0x00040000},
+ {0x00007804, 0xdb005012},
+ {0x00007808, 0x04924914},
+ {0x0000780c, 0x21084210},
+ {0x00007810, 0x6d801300},
+ {0x00007814, 0x0019beff},
+ {0x00007818, 0x07e41000},
+ {0x0000781c, 0x00392000},
+ {0x00007820, 0x92592480},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb005012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x6d801300},
+ {0x00007838, 0x0019beff},
+ {0x0000783c, 0x07e40000},
+ {0x00007840, 0x00392000},
+ {0x00007844, 0x92592480},
+ {0x00007848, 0x00100000},
+ {0x0000784c, 0x773f0567},
+ {0x00007850, 0x54214514},
+ {0x00007854, 0x12035828},
+ {0x00007858, 0x92592692},
+ {0x0000785c, 0x00000000},
+ {0x00007860, 0x56400000},
+ {0x00007864, 0x0a8e370e},
+ {0x00007868, 0xc0102850},
+ {0x0000786c, 0x812d4000},
+ {0x00007870, 0x807ec400},
+ {0x00007874, 0x001b6db0},
+ {0x00007878, 0x00376b63},
+ {0x0000787c, 0x06db6db6},
+ {0x00007880, 0x006d8000},
+ {0x00007884, 0xffeffffe},
+ {0x00007888, 0xffeffffe},
+ {0x0000788c, 0x00010000},
+ {0x00007890, 0x02060aeb},
+ {0x00007894, 0x5a108000},
+};
+
+static const u32 ar9300_2p2_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+};
+
+static const u32 ar9300_2p2_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e3c, 0xcf946222},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x00000000},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a22c, 0x01036a1e},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000246},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x06000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00000000},
+ {0x0000a610, 0x00000000},
+ {0x0000a614, 0x00000000},
+ {0x0000a618, 0x00000000},
+ {0x0000a61c, 0x00000000},
+ {0x0000a620, 0x00000000},
+ {0x0000a624, 0x00000000},
+ {0x0000a628, 0x00000000},
+ {0x0000a62c, 0x00000000},
+ {0x0000a630, 0x00000000},
+ {0x0000a634, 0x00000000},
+ {0x0000a638, 0x00000000},
+ {0x0000a63c, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00000637},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2dc, 0x00000000},
+ {0x0000b2e0, 0x00000000},
+ {0x0000b2e4, 0x00000000},
+ {0x0000b2e8, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2dc, 0x00000000},
+ {0x0000c2e0, 0x00000000},
+ {0x0000c2e4, 0x00000000},
+ {0x0000c2e8, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+};
+
+static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
+ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300_2p2_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f424},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e848},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9bc00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9300Common_wo_xlna_rx_gain_table_2p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300_2p2_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
+ {0x00007008, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000008},
+};
+
+static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08212e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08253e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08213e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+#endif /* INITVALS_9003_2P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 56a9e5fa6d66..5a0650399136 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -739,6 +739,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
*/
ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
+ /* Do Tx IQ Calibration */
+ ar9003_hw_tx_iq_cal(ah);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+ udelay(5);
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
/* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
REG_READ(ah, AR_PHY_AGC_CONTROL) |
@@ -753,10 +759,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
return false;
}
- /* Do Tx IQ Calibration */
- if (ah->config.tx_iq_calibration)
- ar9003_hw_tx_iq_cal(ah);
-
/* Revert chainmasks to their original values before NF cal */
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 23eb60ea5455..343c9a427acb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -67,6 +67,7 @@ static const struct ar9300_eeprom ar9300_default = {
* bit2 - enable fastClock - enabled
* bit3 - enable doubling - enabled
* bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
*/
.miscConfiguration = 0, /* bit0 - turn down drivestrength */
.eepromWriteEnableGpio = 3,
@@ -129,9 +130,11 @@ static const struct ar9300_eeprom ar9300_default = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
- .futureModal = { /* [32] */
+ .papdRateMaskHt20 = LE32(0x80c080),
+ .papdRateMaskHt40 = LE32(0x80c080),
+ .futureModal = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0, 0, 0, 0
},
},
.calFreqPier2G = {
@@ -326,9 +329,11 @@ static const struct ar9300_eeprom ar9300_default = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0xf0e0e0),
+ .papdRateMaskHt40 = LE32(0xf0e0e0),
.futureModal = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0, 0, 0, 0
},
},
.calFreqPier5G = {
@@ -644,6 +649,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return (pBase->featureEnable & 0x10) >> 4;
case EEP_SWREG:
return le32_to_cpu(pBase->swreg);
+ case EEP_PAPRD:
+ return !!(pBase->featureEnable & BIT(5));
default:
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 23fb353c3bba..3c533bb983c7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -234,7 +234,9 @@ struct ar9300_modal_eep_header {
u8 txEndToRxOn;
u8 txFrameToXpaOn;
u8 thresh62;
- u8 futureModal[32];
+ __le32 papdRateMaskHt20;
+ __le32 papdRateMaskHt40;
+ u8 futureModal[24];
} __packed;
struct ar9300_cal_data_per_freq_op_loop {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index b15309caf1da..82c3ab756cd0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -16,7 +16,8 @@
#include "hw.h"
#include "ar9003_mac.h"
-#include "ar9003_initvals.h"
+#include "ar9003_2p0_initvals.h"
+#include "ar9003_2p2_initvals.h"
/* General hardware code for the AR9003 hadware family */
@@ -31,12 +32,8 @@ static bool ar9003_hw_macversion_supported(u32 macversion)
return false;
}
-/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */
-/*
- * XXX: move TX/RX gain INI to its own init_mode_gain_regs after
- * ensuring it does not affect hardware bring up
- */
-static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+/* AR9003 2.0 */
+static void ar9003_2p0_hw_init_mode_regs(struct ath_hw *ah)
{
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -106,27 +103,128 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
3);
}
+/* AR9003 2.2 */
+static void ar9003_2p2_hw_init_mode_regs(struct ath_hw *ah)
+{
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9300_2p2_mac_core,
+ ARRAY_SIZE(ar9300_2p2_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9300_2p2_mac_postamble,
+ ARRAY_SIZE(ar9300_2p2_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9300_2p2_baseband_core,
+ ARRAY_SIZE(ar9300_2p2_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9300_2p2_baseband_postamble,
+ ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9300_2p2_radio_core,
+ ARRAY_SIZE(ar9300_2p2_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9300_2p2_radio_postamble,
+ ARRAY_SIZE(ar9300_2p2_radio_postamble), 5);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9300_2p2_soc_preamble,
+ ARRAY_SIZE(ar9300_2p2_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9300_2p2_soc_postamble,
+ ARRAY_SIZE(ar9300_2p2_soc_postamble), 5);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
+ 5);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
+ ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
+ 2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9300PciePhy_clkreq_enable_L1_2p2,
+ ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2),
+ 2);
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9300Modes_fast_clock_2p2,
+ ARRAY_SIZE(ar9300Modes_fast_clock_2p2),
+ 3);
+}
+
+/*
+ * The AR9003 family uses a new INI format (pre, core, post
+ * arrays per subsystem).
+ */
+static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9300_20(ah))
+ ar9003_2p0_hw_init_mode_regs(ah);
+ else
+ ar9003_2p2_hw_init_mode_regs(ah);
+}
+
static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
{
switch (ar9003_hw_get_tx_gain_idx(ah)) {
case 0:
default:
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
- ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
- 5);
+ if (AR_SREV_9300_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
+ 5);
break;
case 1:
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_ob_db_tx_gain_table_2p0,
- ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
- 5);
+ if (AR_SREV_9300_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
+ 5);
break;
case 2:
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_low_ob_db_tx_gain_table_2p0,
- ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
- 5);
+ if (AR_SREV_9300_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_low_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_low_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
+ 5);
break;
}
}
@@ -136,15 +234,28 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
switch (ar9003_hw_get_rx_gain_idx(ah)) {
case 0:
default:
- INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0,
- ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
- 2);
+ if (AR_SREV_9300_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
+ 2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
+ 2);
break;
case 1:
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_wo_xlna_rx_gain_table_2p0,
- ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
- 2);
+ if (AR_SREV_9300_20(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_wo_xlna_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
+ 2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_wo_xlna_rx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
+ 2);
break;
}
}
@@ -202,4 +313,6 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
ar9003_hw_attach_phy_ops(ah);
ar9003_hw_attach_calib_ops(ah);
ar9003_hw_attach_mac_ops(ah);
+
+ ath9k_hw_attach_ani_ops_new(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 37ba37481a47..06ef71019c12 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -90,6 +90,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
MAP_ISR_S2_CST);
mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
MAP_ISR_S2_TSFOOR);
+ mask2 |= ((isr2 & AR_ISR_S2_BB_WATCHDOG) >>
+ MAP_ISR_S2_BB_WATCHDOG);
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR_S2, isr2);
@@ -167,6 +169,9 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
(void) REG_READ(ah, AR_ISR);
}
+
+ if (*masked & ATH9K_INT_BB_WATCHDOG)
+ ar9003_hw_bb_watchdog_read(ah);
}
if (sync_cause) {
@@ -465,6 +470,14 @@ static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
ads->ctl11 &= ~AR_VirtMoreFrag;
}
+void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains)
+{
+ struct ar9003_txc *ads = ds;
+
+ ads->ctl12 |= SM(chains, AR_PAPRDChainMask);
+}
+EXPORT_SYMBOL(ar9003_hw_set_paprd_txdesc);
+
void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
{
struct ath_hw_ops *ops = ath9k_hw_ops(hw);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index f17558b14539..f76f27d16f77 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -40,6 +40,10 @@
#define AR_Not_Sounding 0x20000000
+/* ctl 12 */
+#define AR_PAPRDChainMask 0x00000e00
+#define AR_PAPRDChainMask_S 9
+
#define MAP_ISR_S2_CST 6
#define MAP_ISR_S2_GTT 6
#define MAP_ISR_S2_TIM 3
@@ -47,6 +51,7 @@
#define MAP_ISR_S2_DTIMSYNC 7
#define MAP_ISR_S2_DTIM 7
#define MAP_ISR_S2_TSFOOR 4
+#define MAP_ISR_S2_BB_WATCHDOG 6
#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
new file mode 100644
index 000000000000..49e0c865ce5c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+
+void ar9003_paprd_enable(struct ath_hw *ah, bool val)
+{
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+}
+EXPORT_SYMBOL(ar9003_paprd_enable);
+
+static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_modal_eep_header *hdr;
+ const u32 ctrl0[3] = {
+ AR_PHY_PAPRD_CTRL0_B0,
+ AR_PHY_PAPRD_CTRL0_B1,
+ AR_PHY_PAPRD_CTRL0_B2
+ };
+ const u32 ctrl1[3] = {
+ AR_PHY_PAPRD_CTRL1_B0,
+ AR_PHY_PAPRD_CTRL1_B1,
+ AR_PHY_PAPRD_CTRL1_B2
+ };
+ u32 am_mask, ht40_mask;
+ int i;
+
+ if (ah->curchan && IS_CHAN_5GHZ(ah->curchan))
+ hdr = &eep->modalHeader5G;
+ else
+ hdr = &eep->modalHeader2G;
+
+ am_mask = le32_to_cpu(hdr->papdRateMaskHt20);
+ ht40_mask = le32_to_cpu(hdr->papdRateMaskHt40);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK, am_mask);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK, am_mask);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK, ht40_mask);
+
+ for (i = 0; i < 3; i++) {
+ REG_RMW_FIELD(ah, ctrl0[i],
+ AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE, 1);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE, 1);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK, 181);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT, 361);
+ REG_RMW_FIELD(ah, ctrl1[i],
+ AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA, 0);
+ REG_RMW_FIELD(ah, ctrl0[i],
+ AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH, 3);
+ }
+
+ ar9003_paprd_enable(ah, false);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP, 0x30);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
+ AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, 147);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN, 4);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
+ -15);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR, 400);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES,
+ 100);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_0_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 261376);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_1_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 248079);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_2_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 233759);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_3_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 220464);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_4_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 208194);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_5_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 196949);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_6_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 185706);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_7_B0,
+ AR_PHY_PAPRD_PRE_POST_SCALING, 175487);
+}
+
+static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
+{
+ u32 *entry = ah->paprd_gain_table_entries;
+ u8 *index = ah->paprd_gain_table_index;
+ u32 reg = AR_PHY_TXGAIN_TABLE;
+ int i;
+
+ memset(entry, 0, sizeof(ah->paprd_gain_table_entries));
+ memset(index, 0, sizeof(ah->paprd_gain_table_index));
+
+ for (i = 0; i < 32; i++) {
+ entry[i] = REG_READ(ah, reg);
+ index[i] = (entry[i] >> 24) & 0xff;
+ reg += 4;
+ }
+}
+
+static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
+ int target_power)
+{
+ int olpc_gain_delta = 0;
+ int alpha_therm, alpha_volt;
+ int therm_cal_value, volt_cal_value;
+ int therm_value, volt_value;
+ int thermal_gain_corr, voltage_gain_corr;
+ int desired_scale, desired_gain = 0;
+ u32 reg;
+
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+ desired_scale = REG_READ_FIELD(ah, AR_PHY_TPC_12,
+ AR_PHY_TPC_12_DESIRED_SCALE_HT40_5);
+ alpha_therm = REG_READ_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM);
+ alpha_volt = REG_READ_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_VOLT);
+ therm_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
+ AR_PHY_TPC_18_THERM_CAL_VALUE);
+ volt_cal_value = REG_READ_FIELD(ah, AR_PHY_TPC_18,
+ AR_PHY_TPC_18_VOLT_CAL_VALUE);
+ therm_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
+ AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE);
+ volt_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
+ AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE);
+
+ if (chain == 0)
+ reg = AR_PHY_TPC_11_B0;
+ else if (chain == 1)
+ reg = AR_PHY_TPC_11_B1;
+ else
+ reg = AR_PHY_TPC_11_B2;
+
+ olpc_gain_delta = REG_READ_FIELD(ah, reg,
+ AR_PHY_TPC_11_OLPC_GAIN_DELTA);
+
+ if (olpc_gain_delta >= 128)
+ olpc_gain_delta = olpc_gain_delta - 256;
+
+ thermal_gain_corr = (alpha_therm * (therm_value - therm_cal_value) +
+ (256 / 2)) / 256;
+ voltage_gain_corr = (alpha_volt * (volt_value - volt_cal_value) +
+ (128 / 2)) / 128;
+ desired_gain = target_power - olpc_gain_delta - thermal_gain_corr -
+ voltage_gain_corr + desired_scale;
+
+ return desired_gain;
+}
+
+static void ar9003_tx_force_gain(struct ath_hw *ah, unsigned int gain_index)
+{
+ int selected_gain_entry, txbb1dbgain, txbb6dbgain, txmxrgain;
+ int padrvgnA, padrvgnB, padrvgnC, padrvgnD;
+ u32 *gain_table_entries = ah->paprd_gain_table_entries;
+
+ selected_gain_entry = gain_table_entries[gain_index];
+ txbb1dbgain = selected_gain_entry & 0x7;
+ txbb6dbgain = (selected_gain_entry >> 3) & 0x3;
+ txmxrgain = (selected_gain_entry >> 5) & 0xf;
+ padrvgnA = (selected_gain_entry >> 9) & 0xf;
+ padrvgnB = (selected_gain_entry >> 13) & 0xf;
+ padrvgnC = (selected_gain_entry >> 17) & 0xf;
+ padrvgnD = (selected_gain_entry >> 21) & 0x3;
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN, txbb1dbgain);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN, txbb6dbgain);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN, txmxrgain);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA, padrvgnA);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB, padrvgnB);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC, padrvgnC);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND, padrvgnD);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCED_DAC_GAIN, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_1, AR_PHY_TPC_1_FORCE_DAC_GAIN, 0);
+}
+
+static inline int find_expn(int num)
+{
+ return fls(num) - 1;
+}
+
+static inline int find_proper_scale(int expn, int N)
+{
+ return (expn > N) ? expn - 10 : 0;
+}
+
+#define NUM_BIN 23
+
+static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
+{
+ unsigned int thresh_accum_cnt;
+ int x_est[NUM_BIN + 1], Y[NUM_BIN + 1], theta[NUM_BIN + 1];
+ int PA_in[NUM_BIN + 1];
+ int B1_tmp[NUM_BIN + 1], B2_tmp[NUM_BIN + 1];
+ unsigned int B1_abs_max, B2_abs_max;
+ int max_index, scale_factor;
+ int y_est[NUM_BIN + 1];
+ int x_est_fxp1_nonlin, x_tilde[NUM_BIN + 1];
+ unsigned int x_tilde_abs;
+ int G_fxp, Y_intercept, order_x_by_y, M, I, L, sum_y_sqr, sum_y_quad;
+ int Q_x, Q_B1, Q_B2, beta_raw, alpha_raw, scale_B;
+ int Q_scale_B, Q_beta, Q_alpha, alpha, beta, order_1, order_2;
+ int order1_5x, order2_3x, order1_5x_rem, order2_3x_rem;
+ int y5, y3, tmp;
+ int theta_low_bin = 0;
+ int i;
+
+ /* disregard any bin that contains <= 16 samples */
+ thresh_accum_cnt = 16;
+ scale_factor = 5;
+ max_index = 0;
+ memset(theta, 0, sizeof(theta));
+ memset(x_est, 0, sizeof(x_est));
+ memset(Y, 0, sizeof(Y));
+ memset(y_est, 0, sizeof(y_est));
+ memset(x_tilde, 0, sizeof(x_tilde));
+
+ for (i = 0; i < NUM_BIN; i++) {
+ s32 accum_cnt, accum_tx, accum_rx, accum_ang;
+
+ /* number of samples */
+ accum_cnt = data_L[i] & 0xffff;
+
+ if (accum_cnt <= thresh_accum_cnt)
+ continue;
+
+ /* sum(tx amplitude) */
+ accum_tx = ((data_L[i] >> 16) & 0xffff) |
+ ((data_U[i] & 0x7ff) << 16);
+
+ /* sum(rx amplitude distance to lower bin edge) */
+ accum_rx = ((data_U[i] >> 11) & 0x1f) |
+ ((data_L[i + 23] & 0xffff) << 5);
+
+ /* sum(angles) */
+ accum_ang = ((data_L[i + 23] >> 16) & 0xffff) |
+ ((data_U[i + 23] & 0x7ff) << 16);
+
+ accum_tx <<= scale_factor;
+ accum_rx <<= scale_factor;
+ x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
+ scale_factor;
+
+ Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
+ scale_factor) +
+ (1 << scale_factor) * max_index + 16;
+
+ if (accum_ang >= (1 << 26))
+ accum_ang -= 1 << 27;
+
+ theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) /
+ accum_cnt;
+
+ max_index++;
+ }
+
+ /*
+ * Find average theta of first 5 bin and all of those to same value.
+ * Curve is linear at that range.
+ */
+ for (i = 1; i < 6; i++)
+ theta_low_bin += theta[i];
+
+ theta_low_bin = theta_low_bin / 5;
+ for (i = 1; i < 6; i++)
+ theta[i] = theta_low_bin;
+
+ /* Set values at origin */
+ theta[0] = theta_low_bin;
+ for (i = 0; i <= max_index; i++)
+ theta[i] -= theta_low_bin;
+
+ x_est[0] = 0;
+ Y[0] = 0;
+ scale_factor = 8;
+
+ /* low signal gain */
+ if (x_est[6] == x_est[3])
+ return false;
+
+ G_fxp =
+ (((Y[6] - Y[3]) * 1 << scale_factor) +
+ (x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
+
+ Y_intercept =
+ (G_fxp * (x_est[0] - x_est[3]) +
+ (1 << scale_factor)) / (1 << scale_factor) + Y[3];
+
+ for (i = 0; i <= max_index; i++)
+ y_est[i] = Y[i] - Y_intercept;
+
+ for (i = 0; i <= 3; i++) {
+ y_est[i] = i * 32;
+
+ /* prevent division by zero */
+ if (G_fxp == 0)
+ return false;
+
+ x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
+ }
+
+ x_est_fxp1_nonlin =
+ x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
+ G_fxp) / G_fxp;
+
+ order_x_by_y =
+ (x_est_fxp1_nonlin + y_est[max_index]) / y_est[max_index];
+
+ if (order_x_by_y == 0)
+ M = 10;
+ else if (order_x_by_y == 1)
+ M = 9;
+ else
+ M = 8;
+
+ I = (max_index > 15) ? 7 : max_index >> 1;
+ L = max_index - I;
+ scale_factor = 8;
+ sum_y_sqr = 0;
+ sum_y_quad = 0;
+ x_tilde_abs = 0;
+
+ for (i = 0; i <= L; i++) {
+ unsigned int y_sqr;
+ unsigned int y_quad;
+ unsigned int tmp_abs;
+
+ /* prevent division by zero */
+ if (y_est[i + I] == 0)
+ return false;
+
+ x_est_fxp1_nonlin =
+ x_est[i + I] - ((1 << scale_factor) * y_est[i + I] +
+ G_fxp) / G_fxp;
+
+ x_tilde[i] =
+ (x_est_fxp1_nonlin * (1 << M) + y_est[i + I]) / y_est[i +
+ I];
+ x_tilde[i] =
+ (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
+ x_tilde[i] =
+ (x_tilde[i] * (1 << M) + y_est[i + I]) / y_est[i + I];
+ y_sqr =
+ (y_est[i + I] * y_est[i + I] +
+ (scale_factor * scale_factor)) / (scale_factor *
+ scale_factor);
+ tmp_abs = abs(x_tilde[i]);
+ if (tmp_abs > x_tilde_abs)
+ x_tilde_abs = tmp_abs;
+
+ y_quad = y_sqr * y_sqr;
+ sum_y_sqr = sum_y_sqr + y_sqr;
+ sum_y_quad = sum_y_quad + y_quad;
+ B1_tmp[i] = y_sqr * (L + 1);
+ B2_tmp[i] = y_sqr;
+ }
+
+ B1_abs_max = 0;
+ B2_abs_max = 0;
+ for (i = 0; i <= L; i++) {
+ int abs_val;
+
+ B1_tmp[i] -= sum_y_sqr;
+ B2_tmp[i] = sum_y_quad - sum_y_sqr * B2_tmp[i];
+
+ abs_val = abs(B1_tmp[i]);
+ if (abs_val > B1_abs_max)
+ B1_abs_max = abs_val;
+
+ abs_val = abs(B2_tmp[i]);
+ if (abs_val > B2_abs_max)
+ B2_abs_max = abs_val;
+ }
+
+ Q_x = find_proper_scale(find_expn(x_tilde_abs), 10);
+ Q_B1 = find_proper_scale(find_expn(B1_abs_max), 10);
+ Q_B2 = find_proper_scale(find_expn(B2_abs_max), 10);
+
+ beta_raw = 0;
+ alpha_raw = 0;
+ for (i = 0; i <= L; i++) {
+ x_tilde[i] = x_tilde[i] / (1 << Q_x);
+ B1_tmp[i] = B1_tmp[i] / (1 << Q_B1);
+ B2_tmp[i] = B2_tmp[i] / (1 << Q_B2);
+ beta_raw = beta_raw + B1_tmp[i] * x_tilde[i];
+ alpha_raw = alpha_raw + B2_tmp[i] * x_tilde[i];
+ }
+
+ scale_B =
+ ((sum_y_quad / scale_factor) * (L + 1) -
+ (sum_y_sqr / scale_factor) * sum_y_sqr) * scale_factor;
+
+ Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
+ scale_B = scale_B / (1 << Q_scale_B);
+ Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
+ Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
+ beta_raw = beta_raw / (1 << Q_beta);
+ alpha_raw = alpha_raw / (1 << Q_alpha);
+ alpha = (alpha_raw << 10) / scale_B;
+ beta = (beta_raw << 10) / scale_B;
+ order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B;
+ order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B;
+ order1_5x = order_1 / 5;
+ order2_3x = order_2 / 3;
+ order1_5x_rem = order_1 - 5 * order1_5x;
+ order2_3x_rem = order_2 - 3 * order2_3x;
+
+ for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+ tmp = i * 32;
+ y5 = ((beta * tmp) >> 6) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = (y5 * tmp) >> order1_5x;
+ y5 = y5 >> order1_5x_rem;
+ y3 = (alpha * tmp) >> order2_3x;
+ y3 = (y3 * tmp) >> order2_3x;
+ y3 = (y3 * tmp) >> order2_3x;
+ y3 = y3 >> order2_3x_rem;
+ PA_in[i] = y5 + y3 + (256 * tmp) / G_fxp;
+
+ if (i >= 2) {
+ tmp = PA_in[i] - PA_in[i - 1];
+ if (tmp < 0)
+ PA_in[i] =
+ PA_in[i - 1] + (PA_in[i - 1] -
+ PA_in[i - 2]);
+ }
+
+ PA_in[i] = (PA_in[i] < 1400) ? PA_in[i] : 1400;
+ }
+
+ beta_raw = 0;
+ alpha_raw = 0;
+
+ for (i = 0; i <= L; i++) {
+ int theta_tilde =
+ ((theta[i + I] << M) + y_est[i + I]) / y_est[i + I];
+ theta_tilde =
+ ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
+ theta_tilde =
+ ((theta_tilde << M) + y_est[i + I]) / y_est[i + I];
+ beta_raw = beta_raw + B1_tmp[i] * theta_tilde;
+ alpha_raw = alpha_raw + B2_tmp[i] * theta_tilde;
+ }
+
+ Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
+ Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
+ beta_raw = beta_raw / (1 << Q_beta);
+ alpha_raw = alpha_raw / (1 << Q_alpha);
+
+ alpha = (alpha_raw << 10) / scale_B;
+ beta = (beta_raw << 10) / scale_B;
+ order_1 = 3 * M - Q_x - Q_B1 - Q_beta + 10 + Q_scale_B + 5;
+ order_2 = 3 * M - Q_x - Q_B2 - Q_alpha + 10 + Q_scale_B + 5;
+ order1_5x = order_1 / 5;
+ order2_3x = order_2 / 3;
+ order1_5x_rem = order_1 - 5 * order1_5x;
+ order2_3x_rem = order_2 - 3 * order2_3x;
+
+ for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+ int PA_angle;
+
+ /* pa_table[4] is calculated from PA_angle for i=5 */
+ if (i == 4)
+ continue;
+
+ tmp = i * 32;
+ if (beta > 0)
+ y5 = (((beta * tmp - 64) >> 6) -
+ (1 << order1_5x)) / (1 << order1_5x);
+ else
+ y5 = ((((beta * tmp - 64) >> 6) +
+ (1 << order1_5x)) / (1 << order1_5x));
+
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = (y5 * tmp) / (1 << order1_5x);
+ y5 = y5 / (1 << order1_5x_rem);
+
+ if (beta > 0)
+ y3 = (alpha * tmp -
+ (1 << order2_3x)) / (1 << order2_3x);
+ else
+ y3 = (alpha * tmp +
+ (1 << order2_3x)) / (1 << order2_3x);
+ y3 = (y3 * tmp) / (1 << order2_3x);
+ y3 = (y3 * tmp) / (1 << order2_3x);
+ y3 = y3 / (1 << order2_3x_rem);
+
+ if (i < 4) {
+ PA_angle = 0;
+ } else {
+ PA_angle = y5 + y3;
+ if (PA_angle < -150)
+ PA_angle = -150;
+ else if (PA_angle > 150)
+ PA_angle = 150;
+ }
+
+ pa_table[i] = ((PA_in[i] & 0x7ff) << 11) + (PA_angle & 0x7ff);
+ if (i == 5) {
+ PA_angle = (PA_angle + 2) >> 1;
+ pa_table[i - 1] = ((PA_in[i - 1] & 0x7ff) << 11) +
+ (PA_angle & 0x7ff);
+ }
+ }
+
+ *gain = G_fxp;
+ return true;
+}
+
+void ar9003_paprd_populate_single_table(struct ath_hw *ah,
+ struct ath9k_channel *chan, int chain)
+{
+ u32 *paprd_table_val = chan->pa_table[chain];
+ u32 small_signal_gain = chan->small_signal_gain[chain];
+ u32 training_power;
+ u32 reg = 0;
+ int i;
+
+ training_power =
+ REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
+ AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
+ training_power -= 4;
+
+ if (chain == 0)
+ reg = AR_PHY_PAPRD_MEM_TAB_B0;
+ else if (chain == 1)
+ reg = AR_PHY_PAPRD_MEM_TAB_B1;
+ else if (chain == 2)
+ reg = AR_PHY_PAPRD_MEM_TAB_B2;
+
+ for (i = 0; i < PAPRD_TABLE_SZ; i++) {
+ REG_WRITE(ah, reg, paprd_table_val[i]);
+ reg = reg + 4;
+ }
+
+ if (chain == 0)
+ reg = AR_PHY_PA_GAIN123_B0;
+ else if (chain == 1)
+ reg = AR_PHY_PA_GAIN123_B1;
+ else
+ reg = AR_PHY_PA_GAIN123_B2;
+
+ REG_RMW_FIELD(ah, reg, AR_PHY_PA_GAIN123_PA_GAIN1, small_signal_gain);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B0,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
+}
+EXPORT_SYMBOL(ar9003_paprd_populate_single_table);
+
+int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
+{
+
+ unsigned int i, desired_gain, gain_index;
+ unsigned int train_power;
+
+ train_power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
+ AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
+
+ train_power = train_power - 4;
+
+ desired_gain = ar9003_get_desired_gain(ah, chain, train_power);
+
+ gain_index = 0;
+ for (i = 0; i < 32; i++) {
+ if (ah->paprd_gain_table_index[i] >= desired_gain)
+ break;
+ gain_index++;
+ }
+
+ ar9003_tx_force_gain(ah, gain_index);
+
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+
+ return 0;
+}
+EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
+
+int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
+ int chain)
+{
+ u16 *small_signal_gain = &chan->small_signal_gain[chain];
+ u32 *pa_table = chan->pa_table[chain];
+ u32 *data_L, *data_U;
+ int i, status = 0;
+ u32 *buf;
+ u32 reg;
+
+ memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain]));
+
+ buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ data_L = &buf[0];
+ data_U = &buf[48];
+
+ REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
+
+ reg = AR_PHY_CHAN_INFO_TAB_0;
+ for (i = 0; i < 48; i++)
+ data_L[i] = REG_READ(ah, reg + (i << 2));
+
+ REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
+
+ for (i = 0; i < 48; i++)
+ data_U[i] = REG_READ(ah, reg + (i << 2));
+
+ if (!create_pa_curve(data_L, data_U, pa_table, small_signal_gain))
+ status = -2;
+
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+
+ kfree(buf);
+
+ return status;
+}
+EXPORT_SYMBOL(ar9003_paprd_create_curve);
+
+int ar9003_paprd_init_table(struct ath_hw *ah)
+{
+ ar9003_paprd_setup_single_table(ah);
+ ar9003_paprd_get_gain_table(ah);
+ return 0;
+}
+EXPORT_SYMBOL(ar9003_paprd_init_table);
+
+bool ar9003_paprd_is_done(struct ath_hw *ah)
+{
+ return !!REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+}
+EXPORT_SYMBOL(ar9003_paprd_is_done);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 80431a2f6dc1..19bc05c41136 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -17,6 +17,28 @@
#include "hw.h"
#include "ar9003_phy.h"
+static const int firstep_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
+
+static const int cycpwrThr1_table[] =
+/* level: 0 1 2 3 4 5 6 7 8 */
+ { -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
+
+/*
+ * register values to turn OFDM weak signal detection OFF
+ */
+static const int m1ThreshLow_off = 127;
+static const int m2ThreshLow_off = 127;
+static const int m1Thresh_off = 127;
+static const int m2Thresh_off = 127;
+static const int m2CountThr_off = 31;
+static const int m2CountThrLow_off = 63;
+static const int m1ThreshLowExt_off = 127;
+static const int m2ThreshLowExt_off = 127;
+static const int m1ThreshExt_off = 127;
+static const int m2ThreshExt_off = 127;
+
/**
* ar9003_hw_set_channel - set channel on single-chip device
* @ah: atheros hardware structure
@@ -94,7 +116,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
}
/**
- * ar9003_hw_spur_mitigate - convert baseband spur frequency
+ * ar9003_hw_spur_mitigate_mrc_cck - convert baseband spur frequency
* @ah: atheros hardware structure
* @chan:
*
@@ -521,15 +543,6 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
u32 val = INI_RA(iniArr, i, column);
REG_WRITE(ah, reg, val);
-
- /*
- * Determine if this is a shift register value, and insert the
- * configured delay if so.
- */
- if (reg >= 0x16000 && reg < 0x17000
- && ah->config.analog_shiftreg)
- udelay(100);
-
DO_DELAY(regWrites);
}
}
@@ -732,71 +745,68 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
{
struct ar5416AniState *aniState = ah->curani;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ s32 value, value2;
switch (cmd & ah->ani_function) {
- case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
- u32 level = param;
-
- if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
- return false;
- }
-
- REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
- AR_PHY_DESIRED_SZ_TOT_DES,
- ah->totalSizeDesired[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC,
- AR_PHY_AGC_COARSE_LOW,
- ah->coarse_low[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC,
- AR_PHY_AGC_COARSE_HIGH,
- ah->coarse_high[level]);
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
-
- if (level > aniState->noiseImmunityLevel)
- ah->stats.ast_ani_niup++;
- else if (level < aniState->noiseImmunityLevel)
- ah->stats.ast_ani_nidown++;
- aniState->noiseImmunityLevel = level;
- break;
- }
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
- const int m1ThreshLow[] = { 127, 50 };
- const int m2ThreshLow[] = { 127, 40 };
- const int m1Thresh[] = { 127, 0x4d };
- const int m2Thresh[] = { 127, 0x40 };
- const int m2CountThr[] = { 31, 16 };
- const int m2CountThrLow[] = { 63, 48 };
+ /*
+ * on == 1 means ofdm weak signal detection is ON
+ * on == 1 is the default, for less noise immunity
+ *
+ * on == 0 means ofdm weak signal detection is OFF
+ * on == 0 means more noise imm
+ */
u32 on = param ? 1 : 0;
+ /*
+ * make register setting for default
+ * (weak sig detect ON) come from INI file
+ */
+ int m1ThreshLow = on ?
+ aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+ int m2ThreshLow = on ?
+ aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+ int m1Thresh = on ?
+ aniState->iniDef.m1Thresh : m1Thresh_off;
+ int m2Thresh = on ?
+ aniState->iniDef.m2Thresh : m2Thresh_off;
+ int m2CountThr = on ?
+ aniState->iniDef.m2CountThr : m2CountThr_off;
+ int m2CountThrLow = on ?
+ aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+ int m1ThreshLowExt = on ?
+ aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+ int m2ThreshLowExt = on ?
+ aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+ int m1ThreshExt = on ?
+ aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+ int m2ThreshExt = on ?
+ aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
- m1ThreshLow[on]);
+ m1ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
- m2ThreshLow[on]);
+ m2ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]);
+ AR_PHY_SFCORR_M1_THRESH, m1Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]);
+ AR_PHY_SFCORR_M2_THRESH, m2Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]);
+ AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
- m2CountThrLow[on]);
+ m2CountThrLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]);
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]);
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]);
+ AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]);
+ AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
if (on)
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
@@ -806,6 +816,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: ofdm weak signal: %s=>%s\n",
+ chan->channel,
+ !aniState->ofdmWeakSigDetectOff ?
+ "on" : "off",
+ on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
@@ -814,64 +830,167 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
}
break;
}
- case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
- const int weakSigThrCck[] = { 8, 6 };
- u32 high = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
- weakSigThrCck[high]);
- if (high != aniState->cckWeakSigThreshold) {
- if (high)
- ah->stats.ast_ani_cckhigh++;
- else
- ah->stats.ast_ani_ccklow++;
- aniState->cckWeakSigThreshold = high;
- }
- break;
- }
case ATH9K_ANI_FIRSTEP_LEVEL:{
- const int firstep[] = { 0, 4, 8 };
u32 level = param;
- if (level >= ARRAY_SIZE(firstep)) {
+ if (level >= ARRAY_SIZE(firstep_table)) {
ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
+ "ATH9K_ANI_FIRSTEP_LEVEL: level "
+ "out of range (%u > %u)\n",
level,
- (unsigned) ARRAY_SIZE(firstep));
+ (unsigned) ARRAY_SIZE(firstep_table));
return false;
}
+
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ aniState->iniDef.firstep;
+ if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP,
- firstep[level]);
- if (level > aniState->firstepLevel)
- ah->stats.ast_ani_stepup++;
- else if (level < aniState->firstepLevel)
- ah->stats.ast_ani_stepdown++;
- aniState->firstepLevel = level;
+ value);
+ /*
+ * we need to set first step low register too
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = firstep_table[level] -
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ aniState->iniDef.firstepLow;
+ if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
+ if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
+ value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
+
+ if (level != aniState->firstepLevel) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "firstep[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value,
+ aniState->iniDef.firstep);
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "firstep_low[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value2,
+ aniState->iniDef.firstepLow);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ }
break;
}
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
- const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
u32 level = param;
- if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
+ "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
+ "out of range (%u > %u)\n",
level,
- (unsigned) ARRAY_SIZE(cycpwrThr1));
+ (unsigned) ARRAY_SIZE(cycpwrThr1_table));
return false;
}
+ /*
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ aniState->iniDef.cycpwrThr1;
+ if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1,
- cycpwrThr1[level]);
- if (level > aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurup++;
- else if (level < aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurdown++;
- aniState->spurImmunityLevel = level;
+ value);
+
+ /*
+ * set AR_PHY_EXT_CCA for extension channel
+ * make register setting relative to default
+ * from INI file & cap value
+ */
+ value2 = cycpwrThr1_table[level] -
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ aniState->iniDef.cycpwrThr1Ext;
+ if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
+ if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
+ value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+ REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CYCPWR_THR1, value2);
+
+ if (level != aniState->spurImmunityLevel) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "cycpwrThr1[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value,
+ aniState->iniDef.cycpwrThr1);
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] "
+ "cycpwrThr1Ext[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value2,
+ aniState->iniDef.cycpwrThr1Ext);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ }
break;
}
+ case ATH9K_ANI_MRC_CCK:{
+ /*
+ * is_on == 1 means MRC CCK ON (default, less noise imm)
+ * is_on == 0 means MRC CCK is OFF (more noise imm)
+ */
+ bool is_on = param ? 1 : 0;
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+ AR_PHY_MRC_CCK_ENABLE, is_on);
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+ AR_PHY_MRC_CCK_MUX_REG, is_on);
+ if (!is_on != aniState->mrcCCKOff) {
+ ath_print(common, ATH_DBG_ANI,
+ "** ch %d: MRC CCK: %s=>%s\n",
+ chan->channel,
+ !aniState->mrcCCKOff ? "on" : "off",
+ is_on ? "on" : "off");
+ if (is_on)
+ ah->stats.ast_ani_ccklow++;
+ else
+ ah->stats.ast_ani_cckhigh++;
+ aniState->mrcCCKOff = !is_on;
+ }
+ break;
+ }
case ATH9K_ANI_PRESENT:
break;
default:
@@ -880,25 +999,19 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
return false;
}
- ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
ath_print(common, ATH_DBG_ANI,
- "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
- "ofdmWeakSigDetectOff=%d\n",
- aniState->noiseImmunityLevel,
+ "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
+ "MRCcck=%s listenTime=%d CC=%d listen=%d "
+ "ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff);
- ath_print(common, ATH_DBG_ANI,
- "cckWeakSigThreshold=%d, "
- "firstepLevel=%d, listenTime=%d\n",
- aniState->cckWeakSigThreshold,
+ !aniState->ofdmWeakSigDetectOff ? "on" : "off",
aniState->firstepLevel,
- aniState->listenTime);
- ath_print(common, ATH_DBG_ANI,
- "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
- aniState->cycleCount,
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
-
+ !aniState->mrcCCKOff ? "on" : "off",
+ aniState->listenTime,
+ aniState->cycleCount,
+ aniState->listenTime,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
return true;
}
@@ -1111,6 +1224,70 @@ static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
}
+/*
+ * Initialize the ANI register values with default (ini) values.
+ * This routine is called during a (full) hardware reset after
+ * all the registers are initialised from the INI.
+ */
+static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ struct ar5416AniState *aniState;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_channel *chan = ah->curchan;
+ struct ath9k_ani_default *iniDef;
+ int index;
+ u32 val;
+
+ index = ath9k_hw_get_ani_channel_idx(ah, chan);
+ aniState = &ah->ani[index];
+ ah->curani = aniState;
+ iniDef = &aniState->iniDef;
+
+ ath_print(common, ATH_DBG_ANI,
+ "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ah->hw_version.macVersion,
+ ah->hw_version.macRev,
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags);
+
+ val = REG_READ(ah, AR_PHY_SFCORR);
+ iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
+ iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
+ iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_LOW);
+ iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
+ iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
+ iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
+
+ val = REG_READ(ah, AR_PHY_SFCORR_EXT);
+ iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
+ iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
+ iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
+ iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
+ iniDef->firstep = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP);
+ iniDef->firstepLow = REG_READ_FIELD(ah,
+ AR_PHY_FIND_SIG_LOW,
+ AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW);
+ iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
+ AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1);
+ iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CYCPWR_THR1);
+
+ /* these levels just got reset to defaults by the INI */
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+ aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
+
+ aniState->cycleCount = 0;
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1131,4 +1308,124 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->ani_control = ar9003_hw_ani_control;
priv_ops->do_getnf = ar9003_hw_do_getnf;
priv_ops->loadnf = ar9003_hw_loadnf;
+ priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
+}
+
+void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 idle_tmo_ms = ah->bb_watchdog_timeout_ms;
+ u32 val, idle_count;
+
+ if (!idle_tmo_ms) {
+ /* disable IRQ, disable chip-reset for BB panic */
+ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) &
+ ~(AR_PHY_WATCHDOG_RST_ENABLE |
+ AR_PHY_WATCHDOG_IRQ_ENABLE));
+
+ /* disable watchdog in non-IDLE mode, disable in IDLE mode */
+ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_1) &
+ ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
+ AR_PHY_WATCHDOG_IDLE_ENABLE));
+
+ ath_print(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
+ return;
+ }
+
+ /* enable IRQ, disable chip-reset for BB watchdog */
+ val = REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) & AR_PHY_WATCHDOG_CNTL2_MASK;
+ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
+ (val | AR_PHY_WATCHDOG_IRQ_ENABLE) &
+ ~AR_PHY_WATCHDOG_RST_ENABLE);
+
+ /* bound limit to 10 secs */
+ if (idle_tmo_ms > 10000)
+ idle_tmo_ms = 10000;
+
+ /*
+ * The time unit for watchdog event is 2^15 44/88MHz cycles.
+ *
+ * For HT20 we have a time unit of 2^15/44 MHz = .74 ms per tick
+ * For HT40 we have a time unit of 2^15/88 MHz = .37 ms per tick
+ *
+ * Given we use fast clock now in 5 GHz, these time units should
+ * be common for both 2 GHz and 5 GHz.
+ */
+ idle_count = (100 * idle_tmo_ms) / 74;
+ if (ah->curchan && IS_CHAN_HT40(ah->curchan))
+ idle_count = (100 * idle_tmo_ms) / 37;
+
+ /*
+ * enable watchdog in non-IDLE mode, disable in IDLE mode,
+ * set idle time-out.
+ */
+ REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
+ AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
+ AR_PHY_WATCHDOG_IDLE_MASK |
+ (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
+
+ ath_print(common, ATH_DBG_RESET,
+ "Enabled BB Watchdog timeout (%u ms)\n",
+ idle_tmo_ms);
+}
+
+void ar9003_hw_bb_watchdog_read(struct ath_hw *ah)
+{
+ /*
+ * we want to avoid printing in ISR context so we save the
+ * watchdog status to be printed later in bottom half context.
+ */
+ ah->bb_watchdog_last_status = REG_READ(ah, AR_PHY_WATCHDOG_STATUS);
+
+ /*
+ * the watchdog timer should reset on status read but to be sure
+ * sure we write 0 to the watchdog status bit.
+ */
+ REG_WRITE(ah, AR_PHY_WATCHDOG_STATUS,
+ ah->bb_watchdog_last_status & ~AR_PHY_WATCHDOG_STATUS_CLR);
+}
+
+void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 rxc_pcnt = 0, rxf_pcnt = 0, txf_pcnt = 0, status;
+
+ if (likely(!(common->debug_mask & ATH_DBG_RESET)))
+ return;
+
+ status = ah->bb_watchdog_last_status;
+ ath_print(common, ATH_DBG_RESET,
+ "\n==== BB update: BB status=0x%08x ====\n", status);
+ ath_print(common, ATH_DBG_RESET,
+ "** BB state: wd=%u det=%u rdar=%u rOFDM=%d "
+ "rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
+ MS(status, AR_PHY_WATCHDOG_INFO),
+ MS(status, AR_PHY_WATCHDOG_DET_HANG),
+ MS(status, AR_PHY_WATCHDOG_RADAR_SM),
+ MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM),
+ MS(status, AR_PHY_WATCHDOG_RX_CCK_SM),
+ MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM),
+ MS(status, AR_PHY_WATCHDOG_TX_CCK_SM),
+ MS(status, AR_PHY_WATCHDOG_AGC_SM),
+ MS(status,AR_PHY_WATCHDOG_SRCH_SM));
+
+ ath_print(common, ATH_DBG_RESET,
+ "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
+ ath_print(common, ATH_DBG_RESET,
+ "** BB mode: BB_gen_controls=0x%08x **\n",
+ REG_READ(ah, AR_PHY_GEN_CTRL));
+
+ if (ath9k_hw_GetMibCycleCountsPct(ah, &rxc_pcnt, &rxf_pcnt, &txf_pcnt))
+ ath_print(common, ATH_DBG_RESET,
+ "** BB busy times: rx_clear=%d%%, "
+ "rx_frame=%d%%, tx_frame=%d%% **\n",
+ rxc_pcnt, rxf_pcnt, txf_pcnt);
+
+ ath_print(common, ATH_DBG_RESET,
+ "==== BB update: done ====\n\n");
}
+EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index f08cc8bda005..3394dfe52b42 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -149,6 +149,8 @@
#define AR_PHY_EXT_CCA_THRESH62_S 16
#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
#define AR_PHY_EXT_MINCCA_PWR_S 16
+#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L
+#define AR_PHY_EXT_CYCPWR_THR1_S 9
#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
@@ -283,6 +285,12 @@
#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
+#define AR_PHY_MRC_CCK_CTRL (AR_AGC_BASE + 0x1d0)
+#define AR_PHY_MRC_CCK_ENABLE 0x00000001
+#define AR_PHY_MRC_CCK_ENABLE_S 0
+#define AR_PHY_MRC_CCK_MUX_REG 0x00000002
+#define AR_PHY_MRC_CCK_MUX_REG_S 1
+
#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
@@ -451,7 +459,11 @@
#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
-#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
+
+#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
+#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
+#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
+
#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
@@ -467,30 +479,86 @@
#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
-#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
-#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
-#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
-#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
-#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
-#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
+#define AR_PHY_TPC_1 (AR_SM_BASE + 0x1f8)
+#define AR_PHY_TPC_1_FORCED_DAC_GAIN 0x0000003e
+#define AR_PHY_TPC_1_FORCED_DAC_GAIN_S 1
+#define AR_PHY_TPC_1_FORCE_DAC_GAIN 0x00000001
+#define AR_PHY_TPC_1_FORCE_DAC_GAIN_S 0
+
+#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
+#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
+#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
+
+#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
+#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
+#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
+#define AR_PHY_TPC_11_OLPC_GAIN_DELTA 0x00ff0000
+#define AR_PHY_TPC_11_OLPC_GAIN_DELTA_S 16
+
+#define AR_PHY_TPC_12 (AR_SM_BASE + 0x224)
+#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5 0x3e000000
+#define AR_PHY_TPC_12_DESIRED_SCALE_HT40_5_S 25
+
+#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
+#define AR_PHY_TPC_18_THERM_CAL_VALUE 0x000000ff
+#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
+#define AR_PHY_TPC_18_VOLT_CAL_VALUE 0x0000ff00
+#define AR_PHY_TPC_18_VOLT_CAL_VALUE_S 8
+
+#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
+#define AR_PHY_TPC_19_ALPHA_VOLT 0x001f0000
+#define AR_PHY_TPC_19_ALPHA_VOLT_S 16
+#define AR_PHY_TPC_19_ALPHA_THERM 0xff
+#define AR_PHY_TPC_19_ALPHA_THERM_S 0
+
+#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
+#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN 0x00000001
+#define AR_PHY_TX_FORCED_GAIN_FORCE_TX_GAIN_S 0
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN 0x0000000e
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB1DBGAIN_S 1
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN 0x00000030
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXBB6DBGAIN_S 4
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN 0x000003c0
+#define AR_PHY_TX_FORCED_GAIN_FORCED_TXMXRGAIN_S 6
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA 0x00003c00
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNA_S 10
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB 0x0003c000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNB_S 14
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC 0x003c0000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGNC_S 18
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND 0x00c00000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_PADRVGND_S 22
+#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL 0x01000000
+#define AR_PHY_TX_FORCED_GAIN_FORCED_ENABLE_PAL_S 24
-#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
+#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
+
#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450)
-#define AR_PHY_PANIC_WD_STATUS (AR_SM_BASE + 0x5c0)
-#define AR_PHY_PANIC_WD_CTL_1 (AR_SM_BASE + 0x5c4)
-#define AR_PHY_PANIC_WD_CTL_2 (AR_SM_BASE + 0x5c8)
-#define AR_PHY_BT_CTL (AR_SM_BASE + 0x5cc)
+#define AR_PHY_WATCHDOG_STATUS (AR_SM_BASE + 0x5c0)
+#define AR_PHY_WATCHDOG_CTL_1 (AR_SM_BASE + 0x5c4)
+#define AR_PHY_WATCHDOG_CTL_2 (AR_SM_BASE + 0x5c8)
+#define AR_PHY_WATCHDOG_CTL (AR_SM_BASE + 0x5cc)
#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
-#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
+
+#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
+
+#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254)
+#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff
+#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0
+#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00
+#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8
+
#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
@@ -660,17 +728,9 @@
#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
-#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff
-#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
-#define AR_PHY_TPC_19_ALPHA_THERM 0xff
-#define AR_PHY_TPC_19_ALPHA_THERM_S 0
-
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
-#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
-#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
-
/*
* Channel 1 Register Map
*/
@@ -812,35 +872,173 @@
#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
-#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001
-#define AR_PHY_BB_PANIC_IDLE_ENABLE 0x00000002
-#define AR_PHY_BB_PANIC_IDLE_MASK 0xFFFF0000
-#define AR_PHY_BB_PANIC_NON_IDLE_MASK 0x0000FFFC
-
-#define AR_PHY_BB_PANIC_RST_ENABLE 0x00000002
-#define AR_PHY_BB_PANIC_IRQ_ENABLE 0x00000004
-#define AR_PHY_BB_PANIC_CNTL2_MASK 0xFFFFFFF9
-
-#define AR_PHY_BB_WD_STATUS 0x00000007
-#define AR_PHY_BB_WD_STATUS_S 0
-#define AR_PHY_BB_WD_DET_HANG 0x00000008
-#define AR_PHY_BB_WD_DET_HANG_S 3
-#define AR_PHY_BB_WD_RADAR_SM 0x000000F0
-#define AR_PHY_BB_WD_RADAR_SM_S 4
-#define AR_PHY_BB_WD_RX_OFDM_SM 0x00000F00
-#define AR_PHY_BB_WD_RX_OFDM_SM_S 8
-#define AR_PHY_BB_WD_RX_CCK_SM 0x0000F000
-#define AR_PHY_BB_WD_RX_CCK_SM_S 12
-#define AR_PHY_BB_WD_TX_OFDM_SM 0x000F0000
-#define AR_PHY_BB_WD_TX_OFDM_SM_S 16
-#define AR_PHY_BB_WD_TX_CCK_SM 0x00F00000
-#define AR_PHY_BB_WD_TX_CCK_SM_S 20
-#define AR_PHY_BB_WD_AGC_SM 0x0F000000
-#define AR_PHY_BB_WD_AGC_SM_S 24
-#define AR_PHY_BB_WD_SRCH_SM 0xF0000000
-#define AR_PHY_BB_WD_SRCH_SM_S 28
-
-#define AR_PHY_BB_WD_STATUS_CLR 0x00000008
+#define AR_PHY_WATCHDOG_NON_IDLE_ENABLE 0x00000001
+#define AR_PHY_WATCHDOG_IDLE_ENABLE 0x00000002
+#define AR_PHY_WATCHDOG_IDLE_MASK 0xFFFF0000
+#define AR_PHY_WATCHDOG_NON_IDLE_MASK 0x0000FFFC
+
+#define AR_PHY_WATCHDOG_RST_ENABLE 0x00000002
+#define AR_PHY_WATCHDOG_IRQ_ENABLE 0x00000004
+#define AR_PHY_WATCHDOG_CNTL2_MASK 0xFFFFFFF9
+
+#define AR_PHY_WATCHDOG_INFO 0x00000007
+#define AR_PHY_WATCHDOG_INFO_S 0
+#define AR_PHY_WATCHDOG_DET_HANG 0x00000008
+#define AR_PHY_WATCHDOG_DET_HANG_S 3
+#define AR_PHY_WATCHDOG_RADAR_SM 0x000000F0
+#define AR_PHY_WATCHDOG_RADAR_SM_S 4
+#define AR_PHY_WATCHDOG_RX_OFDM_SM 0x00000F00
+#define AR_PHY_WATCHDOG_RX_OFDM_SM_S 8
+#define AR_PHY_WATCHDOG_RX_CCK_SM 0x0000F000
+#define AR_PHY_WATCHDOG_RX_CCK_SM_S 12
+#define AR_PHY_WATCHDOG_TX_OFDM_SM 0x000F0000
+#define AR_PHY_WATCHDOG_TX_OFDM_SM_S 16
+#define AR_PHY_WATCHDOG_TX_CCK_SM 0x00F00000
+#define AR_PHY_WATCHDOG_TX_CCK_SM_S 20
+#define AR_PHY_WATCHDOG_AGC_SM 0x0F000000
+#define AR_PHY_WATCHDOG_AGC_SM_S 24
+#define AR_PHY_WATCHDOG_SRCH_SM 0xF0000000
+#define AR_PHY_WATCHDOG_SRCH_SM_S 28
+
+#define AR_PHY_WATCHDOG_STATUS_CLR 0x00000008
+
+/*
+ * PAPRD registers
+ */
+#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
+
+#define AR_PHY_PAPRD_AM2AM (AR_CHAN_BASE + 0xe4)
+#define AR_PHY_PAPRD_AM2AM_MASK 0x01ffffff
+#define AR_PHY_PAPRD_AM2AM_MASK_S 0
+
+#define AR_PHY_PAPRD_AM2PM (AR_CHAN_BASE + 0xe8)
+#define AR_PHY_PAPRD_AM2PM_MASK 0x01ffffff
+#define AR_PHY_PAPRD_AM2PM_MASK_S 0
+
+#define AR_PHY_PAPRD_HT40 (AR_CHAN_BASE + 0xec)
+#define AR_PHY_PAPRD_HT40_MASK 0x01ffffff
+#define AR_PHY_PAPRD_HT40_MASK_S 0
+
+#define AR_PHY_PAPRD_CTRL0_B0 (AR_CHAN_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_B1 (AR_CHAN1_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_B2 (AR_CHAN2_BASE + 0xf0)
+#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE 0x00000001
+#define AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE_S 0
+#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK 0x00000002
+#define AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK_S 1
+#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH 0xf8000000
+#define AR_PHY_PAPRD_CTRL0_PAPRD_MAG_THRSH_S 27
+
+#define AR_PHY_PAPRD_CTRL1_B0 (AR_CHAN_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_B1 (AR_CHAN1_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_B2 (AR_CHAN2_BASE + 0xf4)
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA 0x00000001
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_SCALING_ENA_S 0
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE 0x00000002
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2AM_ENABLE_S 1
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE 0x00000004
+#define AR_PHY_PAPRD_CTRL1_ADAPTIVE_AM2PM_ENABLE_S 2
+#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL 0x000001f8
+#define AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL_S 3
+#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK 0x0001fe00
+#define AR_PHY_PAPRD_CTRL1_PA_GAIN_SCALE_FACT_MASK_S 9
+#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
+#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
+
+#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + 0x490)
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING_S 1
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE 0x00000100
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE_S 8
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE 0x00000200
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE_S 9
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE 0x00000400
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE_S 10
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE 0x00000800
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE_S 11
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
+#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
+
+#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + 0x494)
+#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
+#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
+
+#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + 0x498)
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP_S 6
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL 0x0001f000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL_S 12
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES 0x000e0000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES_S 17
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN 0x00f00000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN_S 20
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN 0x0f000000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN_S 24
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
+#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
+
+#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + 0x49c)
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA_S 12
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR 0x00000fff
+#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR_S 0
+
+#define AR_PHY_PAPRD_PRE_POST_SCALE_0_B0 (AR_CHAN_BASE + 0x100)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_1_B0 (AR_CHAN_BASE + 0x104)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_2_B0 (AR_CHAN_BASE + 0x108)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_3_B0 (AR_CHAN_BASE + 0x10c)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_4_B0 (AR_CHAN_BASE + 0x110)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_5_B0 (AR_CHAN_BASE + 0x114)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_6_B0 (AR_CHAN_BASE + 0x118)
+#define AR_PHY_PAPRD_PRE_POST_SCALE_7_B0 (AR_CHAN_BASE + 0x11c)
+#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF
+#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0
+
+#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + 0x4a0)
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE 0x00000002
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE_S 1
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR 0x00000004
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_CORR_ERR_S 2
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE 0x00000008
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_ACTIVE_S 3
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX 0x000001f0
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_RX_GAIN_IDX_S 4
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00
+#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9
+
+#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + 0x4a4)
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX 0x001f0000
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX_S 16
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000
+#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21
+
+#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + 0x4a8)
+#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff
+#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0
+
+#define AR_PHY_PAPRD_MEM_TAB_B0 (AR_CHAN_BASE + 0x120)
+#define AR_PHY_PAPRD_MEM_TAB_B1 (AR_CHAN1_BASE + 0x120)
+#define AR_PHY_PAPRD_MEM_TAB_B2 (AR_CHAN2_BASE + 0x120)
+
+#define AR_PHY_PA_GAIN123_B0 (AR_CHAN_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_B1 (AR_CHAN1_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_B2 (AR_CHAN2_BASE + 0xf8)
+#define AR_PHY_PA_GAIN123_PA_GAIN1 0x3FF
+#define AR_PHY_PA_GAIN123_PA_GAIN1_S 0
+
+#define AR_PHY_POWERTX_RATE5 (AR_SM_BASE + 0x1d0)
+#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0 0x3F
+#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0_S 0
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index fbb7dec6ddeb..8d163ae4255e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -20,6 +20,7 @@
#include <linux/etherdevice.h>
#include <linux/device.h>
#include <linux/leds.h>
+#include <linux/completion.h>
#include "debug.h"
#include "common.h"
@@ -136,6 +137,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_MAX_ANTENNA 3
#define ATH_RXBUF 512
#define ATH_TXBUF 512
+#define ATH_TXBUF_RESERVE 5
+#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
#define ATH_TXMAXTRY 13
#define ATH_MGT_TXMAXTRY 4
@@ -192,6 +195,7 @@ enum ATH_AGGR_STATUS {
#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
+ int axq_class;
u32 axq_qnum;
u32 *axq_link;
struct list_head axq_q;
@@ -206,6 +210,70 @@ struct ath_txq {
u8 txq_tailidx;
};
+struct ath_atx_ac {
+ int sched;
+ int qnum;
+ struct list_head list;
+ struct list_head tid_q;
+};
+
+struct ath_buf_state {
+ int bfs_nframes;
+ u16 bfs_al;
+ u16 bfs_frmlen;
+ int bfs_seqno;
+ int bfs_tidno;
+ int bfs_retries;
+ u8 bf_type;
+ u8 bfs_paprd;
+ u32 bfs_keyix;
+ enum ath9k_key_type bfs_keytype;
+};
+
+struct ath_buf {
+ struct list_head list;
+ struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
+ an aggregate) */
+ struct ath_buf *bf_next; /* next subframe in the aggregate */
+ struct sk_buff *bf_mpdu; /* enclosing frame structure */
+ void *bf_desc; /* virtual addr of desc */
+ dma_addr_t bf_daddr; /* physical addr of desc */
+ dma_addr_t bf_buf_addr; /* physical addr of data buffer */
+ bool bf_stale;
+ bool bf_isnullfunc;
+ bool bf_tx_aborted;
+ u16 bf_flags;
+ struct ath_buf_state bf_state;
+ dma_addr_t bf_dmacontext;
+ struct ath_wiphy *aphy;
+};
+
+struct ath_atx_tid {
+ struct list_head list;
+ struct list_head buf_q;
+ struct ath_node *an;
+ struct ath_atx_ac *ac;
+ struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
+ u16 seq_start;
+ u16 seq_next;
+ u16 baw_size;
+ int tidno;
+ int baw_head; /* first un-acked tx buffer */
+ int baw_tail; /* next unused tx buffer slot */
+ int sched;
+ int paused;
+ u8 state;
+};
+
+struct ath_node {
+ struct ath_common *common;
+ struct ath_atx_tid tid[WME_NUM_TID];
+ struct ath_atx_ac ac[WME_NUM_AC];
+ u16 maxampdu;
+ u8 mpdudensity;
+ int last_rssi;
+};
+
#define AGGR_CLEANUP BIT(1)
#define AGGR_ADDBA_COMPLETE BIT(2)
#define AGGR_ADDBA_PROGRESS BIT(3)
@@ -214,6 +282,7 @@ struct ath_tx_control {
struct ath_txq *txq;
int if_id;
enum ath9k_internal_frame_type frame_type;
+ u8 paprd;
};
#define ATH_TX_ERROR 0x01
@@ -223,11 +292,12 @@ struct ath_tx_control {
struct ath_tx {
u16 seq_no;
u32 txqsetup;
- int hwq_map[ATH9K_WME_AC_VO+1];
+ int hwq_map[WME_NUM_AC];
spinlock_t txbuflock;
struct list_head txbuf;
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
struct ath_descdma txdma;
+ int pending_frames[WME_NUM_AC];
};
struct ath_rx_edma {
@@ -267,7 +337,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_init(struct ath_softc *sc, int nbufs);
void ath_tx_cleanup(struct ath_softc *sc);
-struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -351,10 +420,12 @@ int ath_beaconq_config(struct ath_softc *sc);
#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
-#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
+#define ATH_ANI_POLLINTERVAL_OLD 100 /* 100 ms */
+#define ATH_ANI_POLLINTERVAL_NEW 1000 /* 1000 ms */
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+void ath_paprd_calibrate(struct work_struct *work);
void ath_ani_calibrate(unsigned long data);
/**********/
@@ -486,6 +557,9 @@ struct ath_softc {
spinlock_t sc_serial_rw;
spinlock_t sc_pm_lock;
struct mutex mutex;
+ struct work_struct paprd_work;
+ struct completion paprd_complete;
+ int paprd_txok;
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
@@ -544,7 +618,6 @@ struct ath_wiphy {
void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
-int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
int ath_cabq_update(struct ath_softc *);
@@ -560,8 +633,6 @@ irqreturn_t ath_isr(int irq, void *dev);
int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
-const char *ath_mac_bb_name(u32 mac_bb_version);
-const char *ath_rf_name(u16 rf_version);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *ichan);
@@ -612,8 +683,6 @@ void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
-
void ath_start_rfkill_poll(struct ath_softc *sc);
extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index f43d85a302c4..4d4b22d52dfd 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -38,8 +38,7 @@ int ath_beaconq_config(struct ath_softc *sc)
qi.tqi_cwmax = 0;
} else {
/* Adhoc mode; important thing is to use 2x cwmin. */
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA,
- ATH9K_WME_AC_BE);
+ qnum = sc->tx.hwq_map[WME_AC_BE];
ath9k_hw_get_txq_props(ah, qnum, &qi_be);
qi.tqi_aifs = qi_be.tqi_aifs;
qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 7707341cd0d3..16e2849f644d 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -27,270 +27,6 @@ MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
-/* Common RX processing */
-
-/* Assumes you've already done the endian to CPU conversion */
-static bool ath9k_rx_accept(struct ath_common *common,
- struct sk_buff *skb,
- struct ieee80211_rx_status *rxs,
- struct ath_rx_status *rx_stats,
- bool *decrypt_error)
-{
- struct ath_hw *ah = common->ah;
- struct ieee80211_hdr *hdr;
- __le16 fc;
-
- hdr = (struct ieee80211_hdr *) skb->data;
- fc = hdr->frame_control;
-
- if (!rx_stats->rs_datalen)
- return false;
- /*
- * rs_status follows rs_datalen so if rs_datalen is too large
- * we can take a hint that hardware corrupted it, so ignore
- * those frames.
- */
- if (rx_stats->rs_datalen > common->rx_bufsize)
- return false;
-
- /*
- * rs_more indicates chained descriptors which can be used
- * to link buffers together for a sort of scatter-gather
- * operation.
- * reject the frame, we don't support scatter-gather yet and
- * the frame is probably corrupt anyway
- */
- if (rx_stats->rs_more)
- return false;
-
- /*
- * The rx_stats->rs_status will not be set until the end of the
- * chained descriptors so it can be ignored if rs_more is set. The
- * rs_more will be false at the last element of the chained
- * descriptors.
- */
- if (rx_stats->rs_status != 0) {
- if (rx_stats->rs_status & ATH9K_RXERR_CRC)
- rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rx_stats->rs_status & ATH9K_RXERR_PHY)
- return false;
-
- if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
- *decrypt_error = true;
- } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
- if (ieee80211_is_ctl(fc))
- /*
- * Sometimes, we get invalid
- * MIC failures on valid control frames.
- * Remove these mic errors.
- */
- rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
- else
- rxs->flag |= RX_FLAG_MMIC_ERROR;
- }
- /*
- * Reject error frames with the exception of
- * decryption and MIC failures. For monitor mode,
- * we also ignore the CRC error.
- */
- if (ah->opmode == NL80211_IFTYPE_MONITOR) {
- if (rx_stats->rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_CRC))
- return false;
- } else {
- if (rx_stats->rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
- return false;
- }
- }
- }
- return true;
-}
-
-static int ath9k_process_rate(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- struct sk_buff *skb)
-{
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
- unsigned int i = 0;
-
- band = hw->conf.channel->band;
- sband = hw->wiphy->bands[band];
-
- if (rx_stats->rs_rate & 0x80) {
- /* HT rate */
- rxs->flag |= RX_FLAG_HT;
- if (rx_stats->rs_flags & ATH9K_RX_2040)
- rxs->flag |= RX_FLAG_40MHZ;
- if (rx_stats->rs_flags & ATH9K_RX_GI)
- rxs->flag |= RX_FLAG_SHORT_GI;
- rxs->rate_idx = rx_stats->rs_rate & 0x7f;
- return 0;
- }
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
- rxs->rate_idx = i;
- return 0;
- }
- if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
- rxs->flag |= RX_FLAG_SHORTPRE;
- rxs->rate_idx = i;
- return 0;
- }
- }
-
- /*
- * No valid hardware bitrate found -- we should not get here
- * because hardware has already validated this frame as OK.
- */
- ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
- "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
- if ((common->debug_mask & ATH_DBG_XMIT))
- print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
-
- return -EINVAL;
-}
-
-static void ath9k_process_rssi(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats)
-{
- struct ath_hw *ah = common->ah;
- struct ieee80211_sta *sta;
- struct ieee80211_hdr *hdr;
- struct ath_node *an;
- int last_rssi = ATH_RSSI_DUMMY_MARKER;
- __le16 fc;
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
-
- rcu_read_lock();
- /*
- * XXX: use ieee80211_find_sta! This requires quite a bit of work
- * under the current ath9k virtual wiphy implementation as we have
- * no way of tying a vif to wiphy. Typically vifs are attached to
- * at least one sdata of a wiphy on mac80211 but with ath9k virtual
- * wiphy you'd have to iterate over every wiphy and each sdata.
- */
- sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
- if (sta) {
- an = (struct ath_node *) sta->drv_priv;
- if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
- !rx_stats->rs_moreaggr)
- ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
- last_rssi = an->last_rssi;
- }
- rcu_read_unlock();
-
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
- ATH_RSSI_EP_MULTIPLIER);
- if (rx_stats->rs_rssi < 0)
- rx_stats->rs_rssi = 0;
-
- /* Update Beacon RSSI, this is used by ANI. */
- if (ieee80211_is_beacon(fc))
- ah->stats.avgbrssi = rx_stats->rs_rssi;
-}
-
-/*
- * For Decrypt or Demic errors, we only mark packet status here and always push
- * up the frame up to let mac80211 handle the actual error case, be it no
- * decryption key or real decryption error. This let us keep statistics there.
- */
-int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rx_status,
- bool *decrypt_error)
-{
- struct ath_hw *ah = common->ah;
-
- memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
-
- /*
- * everything but the rate is checked here, the rate check is done
- * separately to avoid doing two lookups for a rate for each frame.
- */
- if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
- return -EINVAL;
-
- ath9k_process_rssi(common, hw, skb, rx_stats);
-
- if (ath9k_process_rate(common, hw, rx_stats, rx_status, skb))
- return -EINVAL;
-
- rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
- rx_status->band = hw->conf.channel->band;
- rx_status->freq = hw->conf.channel->center_freq;
- rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
- rx_status->antenna = rx_stats->rs_antenna;
- rx_status->flag |= RX_FLAG_TSFT;
-
- return 0;
-}
-EXPORT_SYMBOL(ath9k_cmn_rx_skb_preprocess);
-
-void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- bool decrypt_error)
-{
- struct ath_hw *ah = common->ah;
- struct ieee80211_hdr *hdr;
- int hdrlen, padpos, padsize;
- u8 keyix;
- __le16 fc;
-
- /* see if any padding is done by the hw and remove it */
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- fc = hdr->frame_control;
- padpos = ath9k_cmn_padpos(hdr->frame_control);
-
- /* The MAC header is padded to have 32-bit boundary if the
- * packet payload is non-zero. The general calculation for
- * padsize would take into account odd header lengths:
- * padsize = (4 - padpos % 4) % 4; However, since only
- * even-length headers are used, padding can only be 0 or 2
- * bytes and we can optimize this a bit. In addition, we must
- * not try to remove padding from short control frames that do
- * not have payload. */
- padsize = padpos & 3;
- if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, padpos);
- skb_pull(skb, padsize);
- }
-
- keyix = rx_stats->rs_keyix;
-
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
- ieee80211_has_protected(fc)) {
- rxs->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc)
- && !decrypt_error && skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
-
- if (test_bit(keyix, common->keymap))
- rxs->flag |= RX_FLAG_DECRYPTED;
- }
- if (ah->sw_mgmt_crypto &&
- (rxs->flag & RX_FLAG_DECRYPTED) &&
- ieee80211_is_mgmt(fc))
- /* Use software decrypt for management frames. */
- rxs->flag &= ~RX_FLAG_DECRYPTED;
-}
-EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
-
int ath9k_cmn_padpos(__le16 frame_control)
{
int padpos = 24;
@@ -475,10 +211,14 @@ static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
return -1;
}
-static int ath_reserve_key_cache_slot(struct ath_common *common)
+static int ath_reserve_key_cache_slot(struct ath_common *common,
+ enum ieee80211_key_alg alg)
{
int i;
+ if (alg == ALG_TKIP)
+ return ath_reserve_key_cache_slot_tkip(common);
+
/* First, try to find slots that would not be available for TKIP. */
if (common->splitmic) {
for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
@@ -547,6 +287,7 @@ int ath9k_cmn_key_config(struct ath_common *common,
struct ath_hw *ah = common->ah;
struct ath9k_keyval hk;
const u8 *mac = NULL;
+ u8 gmac[ETH_ALEN];
int ret = 0;
int idx;
@@ -570,9 +311,23 @@ int ath9k_cmn_key_config(struct ath_common *common,
memcpy(hk.kv_val, key->key, key->keylen);
if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
- /* For now, use the default keys for broadcast keys. This may
- * need to change with virtual interfaces. */
- idx = key->keyidx;
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ memcpy(gmac, vif->addr, ETH_ALEN);
+ gmac[0] |= 0x01;
+ mac = gmac;
+ idx = ath_reserve_key_cache_slot(common, key->alg);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ memcpy(gmac, sta->addr, ETH_ALEN);
+ gmac[0] |= 0x01;
+ mac = gmac;
+ idx = ath_reserve_key_cache_slot(common, key->alg);
+ break;
+ default:
+ idx = key->keyidx;
+ break;
+ }
} else if (key->keyidx) {
if (WARN_ON(!sta))
return -EOPNOTSUPP;
@@ -589,14 +344,12 @@ int ath9k_cmn_key_config(struct ath_common *common,
return -EOPNOTSUPP;
mac = sta->addr;
- if (key->alg == ALG_TKIP)
- idx = ath_reserve_key_cache_slot_tkip(common);
- else
- idx = ath_reserve_key_cache_slot(common);
- if (idx < 0)
- return -ENOSPC; /* no free key cache entries */
+ idx = ath_reserve_key_cache_slot(common, key->alg);
}
+ if (idx < 0)
+ return -ENOSPC; /* no free key cache entries */
+
if (key->alg == ALG_TKIP)
ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
vif->type == NL80211_IFTYPE_AP);
@@ -644,6 +397,19 @@ void ath9k_cmn_key_delete(struct ath_common *common,
}
EXPORT_SYMBOL(ath9k_cmn_key_delete);
+int ath9k_cmn_count_streams(unsigned int chainmask, int max)
+{
+ int streams = 0;
+
+ do {
+ if (++streams == max)
+ break;
+ } while ((chainmask = chainmask & (chainmask - 1)));
+
+ return streams;
+}
+EXPORT_SYMBOL(ath9k_cmn_count_streams);
+
static int __init ath9k_cmn_init(void)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index e08f7e5a26e0..97809d39c73f 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -52,82 +52,6 @@
#define ATH_EP_RND(x, mul) \
((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
-struct ath_atx_ac {
- int sched;
- int qnum;
- struct list_head list;
- struct list_head tid_q;
-};
-
-struct ath_buf_state {
- int bfs_nframes;
- u16 bfs_al;
- u16 bfs_frmlen;
- int bfs_seqno;
- int bfs_tidno;
- int bfs_retries;
- u8 bf_type;
- u32 bfs_keyix;
- enum ath9k_key_type bfs_keytype;
-};
-
-struct ath_buf {
- struct list_head list;
- struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
- an aggregate) */
- struct ath_buf *bf_next; /* next subframe in the aggregate */
- struct sk_buff *bf_mpdu; /* enclosing frame structure */
- void *bf_desc; /* virtual addr of desc */
- dma_addr_t bf_daddr; /* physical addr of desc */
- dma_addr_t bf_buf_addr; /* physical addr of data buffer */
- bool bf_stale;
- bool bf_isnullfunc;
- bool bf_tx_aborted;
- u16 bf_flags;
- struct ath_buf_state bf_state;
- dma_addr_t bf_dmacontext;
- struct ath_wiphy *aphy;
-};
-
-struct ath_atx_tid {
- struct list_head list;
- struct list_head buf_q;
- struct ath_node *an;
- struct ath_atx_ac *ac;
- struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
- u16 seq_start;
- u16 seq_next;
- u16 baw_size;
- int tidno;
- int baw_head; /* first un-acked tx buffer */
- int baw_tail; /* next unused tx buffer slot */
- int sched;
- int paused;
- u8 state;
-};
-
-struct ath_node {
- struct ath_common *common;
- struct ath_atx_tid tid[WME_NUM_TID];
- struct ath_atx_ac ac[WME_NUM_AC];
- u16 maxampdu;
- u8 mpdudensity;
- int last_rssi;
-};
-
-int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rx_status,
- bool *decrypt_error);
-
-void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- bool decrypt_error);
-
int ath9k_cmn_padpos(__le16 frame_control);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
@@ -140,3 +64,4 @@ int ath9k_cmn_key_config(struct ath_common *common,
struct ieee80211_key_conf *key);
void ath9k_cmn_key_delete(struct ath_common *common,
struct ieee80211_key_conf *key);
+int ath9k_cmn_count_streams(unsigned int chainmask, int max);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 29898f8d1893..54aae931424e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -42,7 +42,7 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
char buf[32];
unsigned int len;
- len = snprintf(buf, sizeof(buf), "0x%08x\n", common->debug_mask);
+ len = sprintf(buf, "0x%08x\n", common->debug_mask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -57,7 +57,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
- return -EINVAL;
+ return -EFAULT;
buf[len] = '\0';
if (strict_strtoul(buf, 0, &mask))
@@ -86,7 +86,7 @@ static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
char buf[32];
unsigned int len;
- len = snprintf(buf, sizeof(buf), "0x%08x\n", common->tx_chainmask);
+ len = sprintf(buf, "0x%08x\n", common->tx_chainmask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -101,7 +101,7 @@ static ssize_t write_file_tx_chainmask(struct file *file, const char __user *use
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
- return -EINVAL;
+ return -EFAULT;
buf[len] = '\0';
if (strict_strtoul(buf, 0, &mask))
@@ -128,7 +128,7 @@ static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
char buf[32];
unsigned int len;
- len = snprintf(buf, sizeof(buf), "0x%08x\n", common->rx_chainmask);
+ len = sprintf(buf, "0x%08x\n", common->rx_chainmask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -143,7 +143,7 @@ static ssize_t write_file_rx_chainmask(struct file *file, const char __user *use
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
- return -EINVAL;
+ return -EFAULT;
buf[len] = '\0';
if (strict_strtoul(buf, 0, &mask))
@@ -176,7 +176,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
if (!buf)
- return 0;
+ return -ENOMEM;
ath9k_ps_wakeup(sc);
@@ -248,6 +248,9 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
ath9k_ps_restore(sc);
+ if (len > DMA_BUF_LEN)
+ len = DMA_BUF_LEN;
+
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
@@ -269,6 +272,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
sc->debug.stats.istats.rxlp++;
if (status & ATH9K_INT_RXHP)
sc->debug.stats.istats.rxhp++;
+ if (status & ATH9K_INT_BB_WATCHDOG)
+ sc->debug.stats.istats.bb_watchdog++;
} else {
if (status & ATH9K_INT_RX)
sc->debug.stats.istats.rxok++;
@@ -319,6 +324,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
"%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
len += snprintf(buf + len, sizeof(buf) - len,
"%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "WATCHDOG",
+ sc->debug.stats.istats.bb_watchdog);
} else {
len += snprintf(buf + len, sizeof(buf) - len,
"%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
@@ -358,6 +366,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
len += snprintf(buf + len, sizeof(buf) - len,
"%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total);
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -397,11 +408,10 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
if (sc->cur_rate_table == NULL)
return 0;
- max = 80 + sc->cur_rate_table->rate_cnt * 1024;
- buf = kmalloc(max + 1, GFP_KERNEL);
+ max = 80 + sc->cur_rate_table->rate_cnt * 1024 + 1;
+ buf = kmalloc(max, GFP_KERNEL);
if (buf == NULL)
- return 0;
- buf[max] = 0;
+ return -ENOMEM;
len += sprintf(buf, "%6s %6s %6s "
"%10s %10s %10s %10s\n",
@@ -443,6 +453,9 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
stats->per);
}
+ if (len > max)
+ len = max;
+
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
@@ -505,6 +518,9 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
len += snprintf(buf + len, sizeof(buf) - len,
"addrmask: %pM\n", addr);
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -614,10 +630,10 @@ static const struct file_operations fops_wiphy = {
do { \
len += snprintf(buf + len, size - len, \
"%s%13u%11u%10u%10u\n", str, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BE]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_BK]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VI]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[ATH9K_WME_AC_VO]].elem); \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \
+ sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \
} while(0)
static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
@@ -630,7 +646,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
- return 0;
+ return -ENOMEM;
len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
@@ -648,6 +664,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("DATA Underrun: ", data_underrun);
PR("DELIM Underrun: ", delim_underrun);
+ if (len > size)
+ len = size;
+
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
@@ -700,7 +719,7 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
- return 0;
+ return -ENOMEM;
len += snprintf(buf + len, size - len,
"%18s : %10u\n", "CRC ERR",
@@ -751,6 +770,9 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+ if (len > size)
+ len = size;
+
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
@@ -802,7 +824,7 @@ static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
char buf[32];
unsigned int len;
- len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.regidx);
+ len = sprintf(buf, "0x%08x\n", sc->debug.regidx);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -816,7 +838,7 @@ static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
- return -EINVAL;
+ return -EFAULT;
buf[len] = '\0';
if (strict_strtoul(buf, 0, &regidx))
@@ -843,7 +865,7 @@ static ssize_t read_file_regval(struct file *file, char __user *user_buf,
u32 regval;
regval = REG_READ_D(ah, sc->debug.regidx);
- len = snprintf(buf, sizeof(buf), "0x%08x\n", regval);
+ len = sprintf(buf, "0x%08x\n", regval);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -858,7 +880,7 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
- return -EINVAL;
+ return -EFAULT;
buf[len] = '\0';
if (strict_strtoul(buf, 0, &regval))
@@ -934,6 +956,10 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, sc, &fops_regval))
goto err;
+ if (!debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
+ goto err;
+
sc->debug.regidx = 0;
return 0;
err:
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 5147b8709e10..5d21704e87ff 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -53,6 +53,7 @@ struct ath_buf;
* @cabend: RX End of CAB traffic
* @dtimsync: DTIM sync lossage
* @dtim: RX Beacon with DTIM
+ * @bb_watchdog: Baseband watchdog
*/
struct ath_interrupt_stats {
u32 total;
@@ -76,6 +77,7 @@ struct ath_interrupt_stats {
u32 cabend;
u32 dtimsync;
u32 dtim;
+ u32 bb_watchdog;
};
struct ath_rc_stats {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index ca8704a9d7ac..1266333f586d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -24,6 +24,14 @@ static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
}
+void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
+{
+ REG_WRITE(ah, reg, val);
+
+ if (ah->config.analog_shiftreg)
+ udelay(100);
+}
+
void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
u32 shift, u32 val)
{
@@ -250,6 +258,27 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
return twiceMaxEdgePower;
}
+void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+
+ switch (ar5416_get_ntxchains(ah->txchainmask)) {
+ case 1:
+ break;
+ case 2:
+ regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
+ break;
+ case 3:
+ regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
+ break;
+ default:
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid chainmask configuration\n");
+ break;
+ }
+}
+
int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
int status;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 21354c15a9a9..bdd8aa054b80 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -263,7 +263,8 @@ enum eeprom_param {
EEP_PWR_TABLE_OFFSET,
EEP_DRIVE_STRENGTH,
EEP_INTERNAL_REGULATOR,
- EEP_SWREG
+ EEP_SWREG,
+ EEP_PAPRD,
};
enum ar5416_rates {
@@ -679,6 +680,7 @@ struct eeprom_ops {
u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
};
+void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val);
void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
u32 shift, u32 val);
int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
@@ -704,6 +706,7 @@ void ath9k_hw_get_target_powers(struct ath_hw *ah,
u16 numRates, bool isHt40Target);
u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
bool is2GHz, int num_band_edges);
+void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah);
int ath9k_hw_eeprom_init(struct ath_hw *ah);
#define ar5416_get_ntxchains(_txchainmask) \
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 41a77d1bd439..e25a2abbf561 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -249,6 +249,7 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah,
struct chan_centers centers;
#define PD_GAIN_BOUNDARY_DEFAULT 58;
+ memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
ath9k_hw_get_channel_centers(ah, chan, &centers);
for (numPiers = 0; numPiers < availPiers; numPiers++) {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index b471db5fb82d..39a41053705f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -17,17 +17,19 @@
#include "hw.h"
#include "ar9002_phy.h"
-static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
+#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16))
+
+static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
{
return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF;
}
-static int ath9k_hw_AR9287_get_eeprom_rev(struct ath_hw *ah)
+static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
{
return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
}
-static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
+static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
{
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
struct ath_common *common = ath9k_hw_common(ah);
@@ -40,20 +42,20 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
"Reading from EEPROM, not flash\n");
}
- for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16);
- addr++) {
- if (!ath9k_hw_nvram_read(common,
- addr + eep_start_loc, eep_data)) {
+ for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+ if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
+ eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
"Unable to read eeprom region\n");
return false;
}
eep_data++;
}
+
return true;
}
-static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
+static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
{
u32 sum = 0, el, integer;
u16 temp, word, magic, magic2, *eepdata;
@@ -63,8 +65,8 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- if (!ath9k_hw_nvram_read(common,
- AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
+ if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
+ &magic)) {
ath_print(common, ATH_DBG_FATAL,
"Reading Magic # failed\n");
return false;
@@ -72,6 +74,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
ath_print(common, ATH_DBG_EEPROM,
"Read Magic = 0x%04X\n", magic);
+
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -79,9 +82,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
need_swap = true;
eepdata = (u16 *)(&ah->eeprom);
- for (addr = 0;
- addr < sizeof(struct ar9287_eeprom) / sizeof(u16);
- addr++) {
+ for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
temp = swab16(*eepdata);
*eepdata = temp;
eepdata++;
@@ -89,13 +90,14 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
} else {
ath_print(common, ATH_DBG_FATAL,
"Invalid EEPROM Magic. "
- "endianness mismatch.\n");
+ "Endianness mismatch.\n");
return -EINVAL;
}
}
}
- ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ?
- "True" : "False");
+
+ ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ need_swap ? "True" : "False");
if (need_swap)
el = swab16(ah->eeprom.map9287.baseEepHeader.length);
@@ -108,6 +110,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
el = el / sizeof(u16);
eepdata = (u16 *)(&ah->eeprom);
+
for (i = 0; i < el; i++)
sum ^= *eepdata++;
@@ -161,7 +164,7 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
return 0;
}
-static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
+static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
enum eeprom_param param)
{
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
@@ -170,6 +173,7 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
u16 ver_minor;
ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK;
+
switch (param) {
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
@@ -214,29 +218,30 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
}
}
-
-static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
- struct ath9k_channel *chan,
- struct cal_data_per_freq_ar9287 *pRawDataSet,
- u8 *bChans, u16 availPiers,
- u16 tPdGainOverlap, int16_t *pMinCalPower,
- u16 *pPdGainBoundaries, u8 *pPDADCValues,
- u16 numXpdGains)
+static void ath9k_hw_get_ar9287_gain_boundaries_pdadcs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ struct cal_data_per_freq_ar9287 *pRawDataSet,
+ u8 *bChans, u16 availPiers,
+ u16 tPdGainOverlap,
+ int16_t *pMinCalPower,
+ u16 *pPdGainBoundaries,
+ u8 *pPDADCValues,
+ u16 numXpdGains)
{
-#define TMP_VAL_VPD_TABLE \
+#define TMP_VAL_VPD_TABLE \
((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep));
- int i, j, k;
- int16_t ss;
- u16 idxL = 0, idxR = 0, numPiers;
- u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
- u8 minPwrT4[AR9287_NUM_PD_GAINS];
- u8 maxPwrT4[AR9287_NUM_PD_GAINS];
- int16_t vpdStep;
- int16_t tmpVal;
- u16 sizeCurrVpdTable, maxIndex, tgtIndex;
- bool match;
- int16_t minDelta = 0;
+ int i, j, k;
+ int16_t ss;
+ u16 idxL = 0, idxR = 0, numPiers;
+ u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
+ u8 minPwrT4[AR9287_NUM_PD_GAINS];
+ u8 maxPwrT4[AR9287_NUM_PD_GAINS];
+ int16_t vpdStep;
+ int16_t tmpVal;
+ u16 sizeCurrVpdTable, maxIndex, tgtIndex;
+ bool match;
+ int16_t minDelta = 0;
struct chan_centers centers;
static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS]
[AR5416_MAX_PWR_RANGE_IN_HALF_DB];
@@ -245,6 +250,7 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS]
[AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+ memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
ath9k_hw_get_channel_centers(ah, chan, &centers);
for (numPiers = 0; numPiers < availPiers; numPiers++) {
@@ -253,18 +259,18 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
}
match = ath9k_hw_get_lower_upper_index(
- (u8)FREQ2FBIN(centers.synth_center,
- IS_CHAN_2GHZ(chan)), bChans, numPiers,
- &idxL, &idxR);
+ (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
+ bChans, numPiers, &idxL, &idxR);
if (match) {
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pRawDataSet[idxL].pwrPdg[i],
- pRawDataSet[idxL].vpdPdg[i],
- AR9287_PD_GAIN_ICEPTS, vpdTableI[i]);
+ pRawDataSet[idxL].pwrPdg[i],
+ pRawDataSet[idxL].vpdPdg[i],
+ AR9287_PD_GAIN_ICEPTS,
+ vpdTableI[i]);
}
} else {
for (i = 0; i < numXpdGains; i++) {
@@ -275,61 +281,59 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
- maxPwrT4[i] =
- min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1],
- pPwrR[AR9287_PD_GAIN_ICEPTS - 1]);
+ maxPwrT4[i] = min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1],
+ pPwrR[AR9287_PD_GAIN_ICEPTS - 1]);
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pPwrL, pVpdL,
- AR9287_PD_GAIN_ICEPTS,
- vpdTableL[i]);
+ pPwrL, pVpdL,
+ AR9287_PD_GAIN_ICEPTS,
+ vpdTableL[i]);
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pPwrR, pVpdR,
- AR9287_PD_GAIN_ICEPTS,
- vpdTableR[i]);
+ pPwrR, pVpdR,
+ AR9287_PD_GAIN_ICEPTS,
+ vpdTableR[i]);
for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
- vpdTableI[i][j] =
- (u8)(ath9k_hw_interpolate((u16)
- FREQ2FBIN(centers. synth_center,
- IS_CHAN_2GHZ(chan)),
- bChans[idxL], bChans[idxR],
- vpdTableL[i][j], vpdTableR[i][j]));
+ vpdTableI[i][j] = (u8)(ath9k_hw_interpolate(
+ (u16)FREQ2FBIN(centers. synth_center,
+ IS_CHAN_2GHZ(chan)),
+ bChans[idxL], bChans[idxR],
+ vpdTableL[i][j], vpdTableR[i][j]));
}
}
}
- *pMinCalPower = (int16_t)(minPwrT4[0] / 2);
+ *pMinCalPower = (int16_t)(minPwrT4[0] / 2);
k = 0;
+
for (i = 0; i < numXpdGains; i++) {
if (i == (numXpdGains - 1))
- pPdGainBoundaries[i] = (u16)(maxPwrT4[i] / 2);
+ pPdGainBoundaries[i] =
+ (u16)(maxPwrT4[i] / 2);
else
- pPdGainBoundaries[i] = (u16)((maxPwrT4[i] +
- minPwrT4[i+1]) / 4);
+ pPdGainBoundaries[i] =
+ (u16)((maxPwrT4[i] + minPwrT4[i+1]) / 4);
pPdGainBoundaries[i] = min((u16)AR5416_MAX_RATE_POWER,
- pPdGainBoundaries[i]);
+ pPdGainBoundaries[i]);
- if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) {
- minDelta = pPdGainBoundaries[0] - 23;
- pPdGainBoundaries[0] = 23;
- } else
- minDelta = 0;
+ minDelta = 0;
if (i == 0) {
if (AR_SREV_9280_10_OR_LATER(ah))
ss = (int16_t)(0 - (minPwrT4[i] / 2));
else
ss = 0;
- } else
+ } else {
ss = (int16_t)((pPdGainBoundaries[i-1] -
- (minPwrT4[i] / 2)) -
+ (minPwrT4[i] / 2)) -
tPdGainOverlap + 1 + minDelta);
+ }
vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
+
while ((ss < 0) && (k < (AR9287_NUM_PDADC_VALUES - 1))) {
tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
@@ -348,12 +352,13 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
vpdTableI[i][sizeCurrVpdTable - 2]);
vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
+
if (tgtIndex > maxIndex) {
while ((ss <= tgtIndex) &&
(k < (AR9287_NUM_PDADC_VALUES - 1))) {
tmpVal = (int16_t) TMP_VAL_VPD_TABLE;
- pPDADCValues[k++] = (u8)((tmpVal > 255) ?
- 255 : tmpVal);
+ pPDADCValues[k++] =
+ (u8)((tmpVal > 255) ? 255 : tmpVal);
ss++;
}
}
@@ -375,10 +380,9 @@ static void ath9k_hw_get_AR9287_gain_boundaries_pdadcs(struct ath_hw *ah,
static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
struct ath9k_channel *chan,
struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop,
- u8 *pCalChans, u16 availPiers,
- int8_t *pPwr)
+ u8 *pCalChans, u16 availPiers, int8_t *pPwr)
{
- u16 idxL = 0, idxR = 0, numPiers;
+ u16 idxL = 0, idxR = 0, numPiers;
bool match;
struct chan_centers centers;
@@ -390,15 +394,14 @@ static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
}
match = ath9k_hw_get_lower_upper_index(
- (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
- pCalChans, numPiers,
- &idxL, &idxR);
+ (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
+ pCalChans, numPiers, &idxL, &idxR);
if (match) {
*pPwr = (int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0];
} else {
*pPwr = ((int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0] +
- (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2;
+ (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2;
}
}
@@ -409,16 +412,22 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah,
u32 tmpVal;
u32 a;
+ /* Enable OLPC for chain 0 */
+
tmpVal = REG_READ(ah, 0xa270);
tmpVal = tmpVal & 0xFCFFFFFF;
tmpVal = tmpVal | (0x3 << 24);
REG_WRITE(ah, 0xa270, tmpVal);
+ /* Enable OLPC for chain 1 */
+
tmpVal = REG_READ(ah, 0xb270);
tmpVal = tmpVal & 0xFCFFFFFF;
tmpVal = tmpVal | (0x3 << 24);
REG_WRITE(ah, 0xb270, tmpVal);
+ /* Write the OLPC ref power for chain 0 */
+
if (chain == 0) {
tmpVal = REG_READ(ah, 0xa398);
tmpVal = tmpVal & 0xff00ffff;
@@ -427,6 +436,8 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah,
REG_WRITE(ah, 0xa398, tmpVal);
}
+ /* Write the OLPC ref power for chain 1 */
+
if (chain == 1) {
tmpVal = REG_READ(ah, 0xb398);
tmpVal = tmpVal & 0xff00ffff;
@@ -436,28 +447,29 @@ static void ar9287_eeprom_olpc_set_pdadcs(struct ath_hw *ah,
}
}
-static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
+static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
struct ath9k_channel *chan,
int16_t *pTxPowerIndexOffset)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct cal_data_per_freq_ar9287 *pRawDataset;
struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
- u8 *pCalBChans = NULL;
+ u8 *pCalBChans = NULL;
u16 pdGainOverlap_t2;
- u8 pdadcValues[AR9287_NUM_PDADC_VALUES];
+ u8 pdadcValues[AR9287_NUM_PDADC_VALUES];
u16 gainBoundaries[AR9287_PD_GAINS_IN_MASK];
u16 numPiers = 0, i, j;
- int16_t tMinCalPower;
+ int16_t tMinCalPower;
u16 numXpdGain, xpdMask;
u16 xpdGainValues[AR9287_NUM_PD_GAINS] = {0, 0, 0, 0};
- u32 reg32, regOffset, regChainOffset;
- int16_t modalIdx, diff = 0;
+ u32 reg32, regOffset, regChainOffset, regval;
+ int16_t modalIdx, diff = 0;
struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
+
modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
xpdMask = pEepData->modalHeader.xpdGain;
+
if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
- AR9287_EEP_MINOR_VER_2)
+ AR9287_EEP_MINOR_VER_2)
pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap;
else
pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
@@ -466,15 +478,16 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
if (IS_CHAN_2GHZ(chan)) {
pCalBChans = pEepData->calFreqPier2G;
numPiers = AR9287_NUM_2G_CAL_PIERS;
- if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+ if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
pRawDatasetOpenLoop =
- (struct cal_data_op_loop_ar9287 *)
- pEepData->calPierData2G[0];
+ (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[0];
ah->initPDADC = pRawDatasetOpenLoop->vpdPdg[0][0];
}
}
numXpdGain = 0;
+
+ /* Calculate the value of xpdgains from the xpdGain Mask */
for (i = 1; i <= AR9287_PD_GAINS_IN_MASK; i++) {
if ((xpdMask >> (AR9287_PD_GAINS_IN_MASK - i)) & 1) {
if (numXpdGain >= AR9287_NUM_PD_GAINS)
@@ -496,99 +509,80 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
for (i = 0; i < AR9287_MAX_CHAINS; i++) {
regChainOffset = i * 0x1000;
+
if (pEepData->baseEepHeader.txMask & (1 << i)) {
- pRawDatasetOpenLoop = (struct cal_data_op_loop_ar9287 *)
- pEepData->calPierData2G[i];
- if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+ pRawDatasetOpenLoop =
+ (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[i];
+
+ if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
int8_t txPower;
ar9287_eeprom_get_tx_gain_index(ah, chan,
- pRawDatasetOpenLoop,
- pCalBChans, numPiers,
- &txPower);
+ pRawDatasetOpenLoop,
+ pCalBChans, numPiers,
+ &txPower);
ar9287_eeprom_olpc_set_pdadcs(ah, txPower, i);
} else {
pRawDataset =
(struct cal_data_per_freq_ar9287 *)
pEepData->calPierData2G[i];
- ath9k_hw_get_AR9287_gain_boundaries_pdadcs(
- ah, chan, pRawDataset,
- pCalBChans, numPiers,
- pdGainOverlap_t2,
- &tMinCalPower, gainBoundaries,
- pdadcValues, numXpdGain);
+
+ ath9k_hw_get_ar9287_gain_boundaries_pdadcs(ah, chan,
+ pRawDataset,
+ pCalBChans, numPiers,
+ pdGainOverlap_t2,
+ &tMinCalPower,
+ gainBoundaries,
+ pdadcValues,
+ numXpdGain);
}
if (i == 0) {
- if (!ath9k_hw_AR9287_get_eeprom(
- ah, EEP_OL_PWRCTRL)) {
- REG_WRITE(ah, AR_PHY_TPCRG5 +
- regChainOffset,
- SM(pdGainOverlap_t2,
- AR_PHY_TPCRG5_PD_GAIN_OVERLAP) |
- SM(gainBoundaries[0],
- AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
- | SM(gainBoundaries[1],
- AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
- | SM(gainBoundaries[2],
- AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
- | SM(gainBoundaries[3],
- AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
+ if (!ath9k_hw_ar9287_get_eeprom(ah,
+ EEP_OL_PWRCTRL)) {
+
+ regval = SM(pdGainOverlap_t2,
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
+ | SM(gainBoundaries[0],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
+ | SM(gainBoundaries[1],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
+ | SM(gainBoundaries[2],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
+ | SM(gainBoundaries[3],
+ AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4);
+
+ REG_WRITE(ah,
+ AR_PHY_TPCRG5 + regChainOffset,
+ regval);
}
}
if ((int32_t)AR9287_PWR_TABLE_OFFSET_DB !=
- pEepData->baseEepHeader.pwrTableOffset) {
- diff = (u16)
- (pEepData->baseEepHeader.pwrTableOffset
- - (int32_t)AR9287_PWR_TABLE_OFFSET_DB);
+ pEepData->baseEepHeader.pwrTableOffset) {
+ diff = (u16)(pEepData->baseEepHeader.pwrTableOffset -
+ (int32_t)AR9287_PWR_TABLE_OFFSET_DB);
diff *= 2;
- for (j = 0;
- j < ((u16)AR9287_NUM_PDADC_VALUES-diff);
- j++)
+ for (j = 0; j < ((u16)AR9287_NUM_PDADC_VALUES-diff); j++)
pdadcValues[j] = pdadcValues[j+diff];
for (j = (u16)(AR9287_NUM_PDADC_VALUES-diff);
j < AR9287_NUM_PDADC_VALUES; j++)
pdadcValues[j] =
- pdadcValues[
- AR9287_NUM_PDADC_VALUES-diff];
+ pdadcValues[AR9287_NUM_PDADC_VALUES-diff];
}
- if (!ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
- regOffset = AR_PHY_BASE + (672 << 2) +
- regChainOffset;
- for (j = 0; j < 32; j++) {
- reg32 = ((pdadcValues[4*j + 0]
- & 0xFF) << 0) |
- ((pdadcValues[4*j + 1]
- & 0xFF) << 8) |
- ((pdadcValues[4*j + 2]
- & 0xFF) << 16) |
- ((pdadcValues[4*j + 3]
- & 0xFF) << 24) ;
- REG_WRITE(ah, regOffset, reg32);
+ if (!ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+ regOffset = AR_PHY_BASE +
+ (672 << 2) + regChainOffset;
- ath_print(common, ATH_DBG_EEPROM,
- "PDADC (%d,%4x): %4.4x "
- "%8.8x\n",
- i, regChainOffset, regOffset,
- reg32);
-
- ath_print(common, ATH_DBG_EEPROM,
- "PDADC: Chain %d | "
- "PDADC %3d Value %3d | "
- "PDADC %3d Value %3d | "
- "PDADC %3d Value %3d | "
- "PDADC %3d Value %3d |\n",
- i, 4 * j, pdadcValues[4 * j],
- 4 * j + 1,
- pdadcValues[4 * j + 1],
- 4 * j + 2,
- pdadcValues[4 * j + 2],
- 4 * j + 3,
- pdadcValues[4 * j + 3]);
+ for (j = 0; j < 32; j++) {
+ reg32 = ((pdadcValues[4*j + 0] & 0xFF) << 0)
+ | ((pdadcValues[4*j + 1] & 0xFF) << 8)
+ | ((pdadcValues[4*j + 2] & 0xFF) << 16)
+ | ((pdadcValues[4*j + 3] & 0xFF) << 24);
+ REG_WRITE(ah, regOffset, reg32);
regOffset += 4;
}
}
@@ -598,30 +592,45 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
*pTxPowerIndexOffset = 0;
}
-static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
- struct ath9k_channel *chan, int16_t *ratesArray, u16 cfgCtl,
- u16 AntennaReduction, u16 twiceMaxRegulatoryPower,
- u16 powerLimit)
+static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ int16_t *ratesArray,
+ u16 cfgCtl,
+ u16 AntennaReduction,
+ u16 twiceMaxRegulatoryPower,
+ u16 powerLimit)
{
+#define CMP_CTL \
+ (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
+ pEepData->ctlIndex[i])
+
+#define CMP_NO_CTL \
+ (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
+ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))
+
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
+
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
static const u16 tpScaleReductionTable[5] =
{ 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
int i;
- int16_t twiceLargestAntenna;
+ int16_t twiceLargestAntenna;
struct cal_ctl_data_ar9287 *rep;
struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
targetPowerCck = {0, {0, 0, 0, 0} };
struct cal_target_power_leg targetPowerOfdmExt = {0, {0, 0, 0, 0} },
targetPowerCckExt = {0, {0, 0, 0, 0} };
- struct cal_target_power_ht targetPowerHt20,
+ struct cal_target_power_ht targetPowerHt20,
targetPowerHt40 = {0, {0, 0, 0, 0} };
u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
- u16 ctlModesFor11g[] =
- {CTL_11B, CTL_11G, CTL_2GHT20,
- CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40};
+ u16 ctlModesFor11g[] = {CTL_11B,
+ CTL_11G,
+ CTL_2GHT20,
+ CTL_11B_EXT,
+ CTL_11G_EXT,
+ CTL_2GHT40};
u16 numCtlModes = 0, *pCtlMode = NULL, ctlMode, freq;
struct chan_centers centers;
int tx_chainmask;
@@ -631,19 +640,28 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
ath9k_hw_get_channel_centers(ah, chan, &centers);
+ /* Compute TxPower reduction due to Antenna Gain */
twiceLargestAntenna = max(pEepData->modalHeader.antennaGainCh[0],
pEepData->modalHeader.antennaGainCh[1]);
+ twiceLargestAntenna = (int16_t)min((AntennaReduction) -
+ twiceLargestAntenna, 0);
- twiceLargestAntenna = (int16_t)min((AntennaReduction) -
- twiceLargestAntenna, 0);
-
+ /*
+ * scaledPower is the minimum of the user input power level
+ * and the regulatory allowed power level.
+ */
maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
+
if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX)
maxRegAllowedPower -=
(tpScaleReductionTable[(regulatory->tp_scale)] * 2);
scaledPower = min(powerLimit, maxRegAllowedPower);
+ /*
+ * Reduce scaled Power by number of chains active
+ * to get the per chain tx power level.
+ */
switch (ar5416_get_ntxchains(tx_chainmask)) {
case 1:
break;
@@ -656,9 +674,14 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
}
scaledPower = max((u16)0, scaledPower);
+ /*
+ * Get TX power from EEPROM.
+ */
if (IS_CHAN_2GHZ(chan)) {
+ /* CTL_11B, CTL_11G, CTL_2GHT20 */
numCtlModes =
ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40;
+
pCtlMode = ctlModesFor11g;
ath9k_hw_get_legacy_target_powers(ah, chan,
@@ -675,6 +698,7 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
&targetPowerHt20, 8, false);
if (IS_CHAN_HT40(chan)) {
+ /* All 2G CTLs */
numCtlModes = ARRAY_SIZE(ctlModesFor11g);
ath9k_hw_get_target_powers(ah, chan,
pEepData->calTargetPower2GHT40,
@@ -692,8 +716,9 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
}
for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
- bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
- (pCtlMode[ctlMode] == CTL_2GHT40);
+ bool isHt40CtlMode =
+ (pCtlMode[ctlMode] == CTL_2GHT40) ? true : false;
+
if (isHt40CtlMode)
freq = centers.synth_center;
else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
@@ -701,31 +726,28 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
- ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
-
+ /* Walk through the CTL indices stored in EEPROM */
for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
- if ((((cfgCtl & ~CTL_MODE_M) |
- (pCtlMode[ctlMode] & CTL_MODE_M)) ==
- pEepData->ctlIndex[i]) ||
- (((cfgCtl & ~CTL_MODE_M) |
- (pCtlMode[ctlMode] & CTL_MODE_M)) ==
- ((pEepData->ctlIndex[i] &
- CTL_MODE_M) | SD_NO_CTL))) {
+ struct cal_ctl_edges *pRdEdgesPower;
+ /*
+ * Compare test group from regulatory channel list
+ * with test mode from pCtlMode list
+ */
+ if (CMP_CTL || CMP_NO_CTL) {
rep = &(pEepData->ctlData[i]);
- twiceMinEdgePower = ath9k_hw_get_max_edge_power(
- freq,
- rep->ctlEdges[ar5416_get_ntxchains(
- tx_chainmask) - 1],
- IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES);
-
- if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
- twiceMaxEdgePower = min(
- twiceMaxEdgePower,
- twiceMinEdgePower);
- else {
+ pRdEdgesPower =
+ rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1];
+
+ twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq,
+ pRdEdgesPower,
+ IS_CHAN_2GHZ(chan),
+ AR5416_NUM_BAND_EDGES);
+
+ if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
+ twiceMaxEdgePower = min(twiceMaxEdgePower,
+ twiceMinEdgePower);
+ } else {
twiceMaxEdgePower = twiceMinEdgePower;
break;
}
@@ -734,55 +756,48 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+ /* Apply ctl mode to correct target power set */
switch (pCtlMode[ctlMode]) {
case CTL_11B:
- for (i = 0;
- i < ARRAY_SIZE(targetPowerCck.tPow2x);
- i++) {
- targetPowerCck.tPow2x[i] = (u8)min(
- (u16)targetPowerCck.tPow2x[i],
- minCtlPower);
+ for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) {
+ targetPowerCck.tPow2x[i] =
+ (u8)min((u16)targetPowerCck.tPow2x[i],
+ minCtlPower);
}
break;
case CTL_11A:
case CTL_11G:
- for (i = 0;
- i < ARRAY_SIZE(targetPowerOfdm.tPow2x);
- i++) {
- targetPowerOfdm.tPow2x[i] = (u8)min(
- (u16)targetPowerOfdm.tPow2x[i],
- minCtlPower);
+ for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) {
+ targetPowerOfdm.tPow2x[i] =
+ (u8)min((u16)targetPowerOfdm.tPow2x[i],
+ minCtlPower);
}
break;
case CTL_5GHT20:
case CTL_2GHT20:
- for (i = 0;
- i < ARRAY_SIZE(targetPowerHt20.tPow2x);
- i++) {
- targetPowerHt20.tPow2x[i] = (u8)min(
- (u16)targetPowerHt20.tPow2x[i],
- minCtlPower);
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) {
+ targetPowerHt20.tPow2x[i] =
+ (u8)min((u16)targetPowerHt20.tPow2x[i],
+ minCtlPower);
}
break;
case CTL_11B_EXT:
- targetPowerCckExt.tPow2x[0] = (u8)min(
- (u16)targetPowerCckExt.tPow2x[0],
- minCtlPower);
+ targetPowerCckExt.tPow2x[0] =
+ (u8)min((u16)targetPowerCckExt.tPow2x[0],
+ minCtlPower);
break;
case CTL_11A_EXT:
case CTL_11G_EXT:
- targetPowerOfdmExt.tPow2x[0] = (u8)min(
- (u16)targetPowerOfdmExt.tPow2x[0],
- minCtlPower);
+ targetPowerOfdmExt.tPow2x[0] =
+ (u8)min((u16)targetPowerOfdmExt.tPow2x[0],
+ minCtlPower);
break;
case CTL_5GHT40:
case CTL_2GHT40:
- for (i = 0;
- i < ARRAY_SIZE(targetPowerHt40.tPow2x);
- i++) {
- targetPowerHt40.tPow2x[i] = (u8)min(
- (u16)targetPowerHt40.tPow2x[i],
- minCtlPower);
+ for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
+ targetPowerHt40.tPow2x[i] =
+ (u8)min((u16)targetPowerHt40.tPow2x[i],
+ minCtlPower);
}
break;
default:
@@ -790,12 +805,13 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
}
}
+ /* Now set the rates array */
+
ratesArray[rate6mb] =
ratesArray[rate9mb] =
ratesArray[rate12mb] =
ratesArray[rate18mb] =
- ratesArray[rate24mb] =
- targetPowerOfdm.tPow2x[0];
+ ratesArray[rate24mb] = targetPowerOfdm.tPow2x[0];
ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
@@ -807,12 +823,12 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
if (IS_CHAN_2GHZ(chan)) {
ratesArray[rate1l] = targetPowerCck.tPow2x[0];
- ratesArray[rate2s] = ratesArray[rate2l] =
- targetPowerCck.tPow2x[1];
- ratesArray[rate5_5s] = ratesArray[rate5_5l] =
- targetPowerCck.tPow2x[2];
- ratesArray[rate11s] = ratesArray[rate11l] =
- targetPowerCck.tPow2x[3];
+ ratesArray[rate2s] =
+ ratesArray[rate2l] = targetPowerCck.tPow2x[1];
+ ratesArray[rate5_5s] =
+ ratesArray[rate5_5l] = targetPowerCck.tPow2x[2];
+ ratesArray[rate11s] =
+ ratesArray[rate11l] = targetPowerCck.tPow2x[3];
}
if (IS_CHAN_HT40(chan)) {
for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++)
@@ -821,28 +837,28 @@ static void ath9k_hw_set_AR9287_power_per_rate_table(struct ath_hw *ah,
ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
+
if (IS_CHAN_2GHZ(chan))
ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0];
}
+#undef CMP_CTL
+#undef CMP_NO_CTL
#undef REDUCE_SCALED_POWER_BY_TWO_CHAIN
#undef REDUCE_SCALED_POWER_BY_THREE_CHAIN
}
-static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
+static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan, u16 cfgCtl,
u8 twiceAntennaReduction,
u8 twiceMaxRegulatoryPower,
u8 powerLimit)
{
-#define INCREASE_MAXPOW_BY_TWO_CHAIN 6
-#define INCREASE_MAXPOW_BY_THREE_CHAIN 10
- struct ath_common *common = ath9k_hw_common(ah);
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader;
int16_t ratesArray[Ar5416RateSize];
- int16_t txPowerIndexOffset = 0;
+ int16_t txPowerIndexOffset = 0;
u8 ht40PowerIncForPdadc = 2;
int i;
@@ -852,13 +868,13 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
AR9287_EEP_MINOR_VER_2)
ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
- ath9k_hw_set_AR9287_power_per_rate_table(ah, chan,
+ ath9k_hw_set_ar9287_power_per_rate_table(ah, chan,
&ratesArray[0], cfgCtl,
twiceAntennaReduction,
twiceMaxRegulatoryPower,
powerLimit);
- ath9k_hw_set_AR9287_power_cal_table(ah, chan, &txPowerIndexOffset);
+ ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset);
for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
@@ -871,6 +887,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
}
+ /* OFDM power per rate */
REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
ATH9K_POW_SM(ratesArray[rate18mb], 24)
| ATH9K_POW_SM(ratesArray[rate12mb], 16)
@@ -883,6 +900,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rate36mb], 8)
| ATH9K_POW_SM(ratesArray[rate24mb], 0));
+ /* CCK power per rate */
if (IS_CHAN_2GHZ(chan)) {
REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
ATH9K_POW_SM(ratesArray[rate2s], 24)
@@ -896,6 +914,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rate5_5l], 0));
}
+ /* HT20 power per rate */
REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
| ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
@@ -908,8 +927,9 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
| ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
+ /* HT40 power per rate */
if (IS_CHAN_HT40(chan)) {
- if (ath9k_hw_AR9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
+ if (ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
ATH9K_POW_SM(ratesArray[rateHt40_3], 24)
| ATH9K_POW_SM(ratesArray[rateHt40_2], 16)
@@ -943,6 +963,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
ht40PowerIncForPdadc, 0));
}
+ /* Dup/Ext power per rate */
REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
| ATH9K_POW_SM(ratesArray[rateExtCck], 16)
@@ -960,37 +981,20 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2;
else
regulatory->max_power_level = ratesArray[i];
-
- switch (ar5416_get_ntxchains(ah->txchainmask)) {
- case 1:
- break;
- case 2:
- regulatory->max_power_level +=
- INCREASE_MAXPOW_BY_TWO_CHAIN;
- break;
- case 3:
- regulatory->max_power_level +=
- INCREASE_MAXPOW_BY_THREE_CHAIN;
- break;
- default:
- ath_print(common, ATH_DBG_EEPROM,
- "Invalid chainmask configuration\n");
- break;
- }
}
-static void ath9k_hw_AR9287_set_addac(struct ath_hw *ah,
+static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
struct ath9k_channel *chan)
{
}
-static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah,
+static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
u16 antWrites[AR9287_ANT_16S];
- u32 regChainOffset;
+ u32 regChainOffset, regval;
u8 txRxAttenLocal;
int i, j, offset_num;
@@ -1077,42 +1081,37 @@ static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB1,
- AR9287_AN_RF2G3_DB1_S, pModal->db1);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0, AR9287_AN_RF2G3_DB2,
- AR9287_AN_RF2G3_DB2_S, pModal->db2);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0,
- AR9287_AN_RF2G3_OB_CCK,
- AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0,
- AR9287_AN_RF2G3_OB_PSK,
- AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0,
- AR9287_AN_RF2G3_OB_QAM,
- AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH0,
- AR9287_AN_RF2G3_OB_PAL_OFF,
- AR9287_AN_RF2G3_OB_PAL_OFF_S,
- pModal->ob_pal_off);
-
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
- AR9287_AN_RF2G3_DB1, AR9287_AN_RF2G3_DB1_S,
- pModal->db1);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1, AR9287_AN_RF2G3_DB2,
- AR9287_AN_RF2G3_DB2_S, pModal->db2);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
- AR9287_AN_RF2G3_OB_CCK,
- AR9287_AN_RF2G3_OB_CCK_S, pModal->ob_cck);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
- AR9287_AN_RF2G3_OB_PSK,
- AR9287_AN_RF2G3_OB_PSK_S, pModal->ob_psk);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
- AR9287_AN_RF2G3_OB_QAM,
- AR9287_AN_RF2G3_OB_QAM_S, pModal->ob_qam);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_RF2G3_CH1,
- AR9287_AN_RF2G3_OB_PAL_OFF,
- AR9287_AN_RF2G3_OB_PAL_OFF_S,
- pModal->ob_pal_off);
+ regval = REG_READ(ah, AR9287_AN_RF2G3_CH0);
+ regval &= ~(AR9287_AN_RF2G3_DB1 |
+ AR9287_AN_RF2G3_DB2 |
+ AR9287_AN_RF2G3_OB_CCK |
+ AR9287_AN_RF2G3_OB_PSK |
+ AR9287_AN_RF2G3_OB_QAM |
+ AR9287_AN_RF2G3_OB_PAL_OFF);
+ regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
+ SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
+ SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
+ SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
+ SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
+ SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
+
+ ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH0, regval);
+
+ regval = REG_READ(ah, AR9287_AN_RF2G3_CH1);
+ regval &= ~(AR9287_AN_RF2G3_DB1 |
+ AR9287_AN_RF2G3_DB2 |
+ AR9287_AN_RF2G3_OB_CCK |
+ AR9287_AN_RF2G3_OB_PSK |
+ AR9287_AN_RF2G3_OB_QAM |
+ AR9287_AN_RF2G3_OB_PAL_OFF);
+ regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
+ SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
+ SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
+ SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
+ SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
+ SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
+
+ ath9k_hw_analog_shift_regwrite(ah, AR9287_AN_RF2G3_CH1, regval);
REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart);
@@ -1125,13 +1124,13 @@ static void ath9k_hw_AR9287_set_board_values(struct ath_hw *ah,
pModal->xpaBiasLvl);
}
-static u8 ath9k_hw_AR9287_get_num_ant_config(struct ath_hw *ah,
+static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah,
enum ieee80211_band freq_band)
{
return 1;
}
-static u16 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
+static u16 ath9k_hw_ar9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
@@ -1140,11 +1139,12 @@ static u16 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
return pModal->antCtrlCommon & 0xFFFF;
}
-static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
+static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
u16 i, bool is2GHz)
{
#define EEP_MAP9287_SPURCHAN \
(ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
+
struct ath_common *common = ath9k_hw_common(ah);
u16 spur_val = AR_NO_SPUR;
@@ -1171,15 +1171,15 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
}
const struct eeprom_ops eep_ar9287_ops = {
- .check_eeprom = ath9k_hw_AR9287_check_eeprom,
- .get_eeprom = ath9k_hw_AR9287_get_eeprom,
- .fill_eeprom = ath9k_hw_AR9287_fill_eeprom,
- .get_eeprom_ver = ath9k_hw_AR9287_get_eeprom_ver,
- .get_eeprom_rev = ath9k_hw_AR9287_get_eeprom_rev,
- .get_num_ant_config = ath9k_hw_AR9287_get_num_ant_config,
- .get_eeprom_antenna_cfg = ath9k_hw_AR9287_get_eeprom_antenna_cfg,
- .set_board_values = ath9k_hw_AR9287_set_board_values,
- .set_addac = ath9k_hw_AR9287_set_addac,
- .set_txpower = ath9k_hw_AR9287_set_txpower,
- .get_spur_channel = ath9k_hw_AR9287_get_spur_channel
+ .check_eeprom = ath9k_hw_ar9287_check_eeprom,
+ .get_eeprom = ath9k_hw_ar9287_get_eeprom,
+ .fill_eeprom = ath9k_hw_ar9287_fill_eeprom,
+ .get_eeprom_ver = ath9k_hw_ar9287_get_eeprom_ver,
+ .get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev,
+ .get_num_ant_config = ath9k_hw_ar9287_get_num_ant_config,
+ .get_eeprom_antenna_cfg = ath9k_hw_ar9287_get_eeprom_antenna_cfg,
+ .set_board_values = ath9k_hw_ar9287_set_board_values,
+ .set_addac = ath9k_hw_ar9287_set_addac,
+ .set_txpower = ath9k_hw_ar9287_set_txpower,
+ .get_spur_channel = ath9k_hw_ar9287_get_spur_channel
};
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 7e1ed78d0e64..77b1433312cc 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -617,6 +617,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
int16_t minDelta = 0;
struct chan_centers centers;
+ memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
ath9k_hw_get_channel_centers(ah, chan, &centers);
for (numPiers = 0; numPiers < availPiers; numPiers++) {
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 77b359162d6c..5f3ea7091ae0 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -16,12 +16,10 @@
#include "htc.h"
-#define ATH9K_FW_USB_DEV(devid, fw) \
- { USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw }
-
static struct usb_device_id ath9k_hif_usb_ids[] = {
- ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"),
- ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"),
+ { USB_DEVICE(0x0cf3, 0x9271) },
+ { USB_DEVICE(0x0cf3, 0x1006) },
+ { USB_DEVICE(0x0cf3, 0x7010) },
{ },
};
@@ -756,6 +754,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
size_t len = hif_dev->firmware->size;
u32 addr = AR9271_FIRMWARE;
u8 *buf = kzalloc(4096, GFP_KERNEL);
+ u32 firm_offset;
if (!buf)
return -ENOMEM;
@@ -779,32 +778,37 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
}
kfree(buf);
+ if (hif_dev->device_id == 0x7010)
+ firm_offset = AR7010_FIRMWARE_TEXT;
+ else
+ firm_offset = AR9271_FIRMWARE_TEXT;
+
/*
* Issue FW download complete command to firmware.
*/
err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
FIRMWARE_DOWNLOAD_COMP,
0x40 | USB_DIR_OUT,
- AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ);
+ firm_offset >> 8, 0, NULL, 0, HZ);
if (err)
return -EIO;
dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
- "ar9271.fw", (unsigned long) hif_dev->firmware->size);
+ hif_dev->fw_name, (unsigned long) hif_dev->firmware->size);
return 0;
}
-static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
- const char *fw_name)
+static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
{
int ret;
/* Request firmware */
- ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev);
+ ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name,
+ &hif_dev->udev->dev);
if (ret) {
dev_err(&hif_dev->udev->dev,
- "ath9k_htc: Firmware - %s not found\n", fw_name);
+ "ath9k_htc: Firmware - %s not found\n", hif_dev->fw_name);
goto err_fw_req;
}
@@ -820,7 +824,8 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret) {
dev_err(&hif_dev->udev->dev,
- "ath9k_htc: Firmware - %s download failed\n", fw_name);
+ "ath9k_htc: Firmware - %s download failed\n",
+ hif_dev->fw_name);
goto err_fw_download;
}
@@ -847,7 +852,6 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
{
struct usb_device *udev = interface_to_usbdev(interface);
struct hif_device_usb *hif_dev;
- const char *fw_name = (const char *) id->driver_info;
int ret = 0;
hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
@@ -872,7 +876,29 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
goto err_htc_hw_alloc;
}
- ret = ath9k_hif_usb_dev_init(hif_dev, fw_name);
+ /* Find out which firmware to load */
+
+ switch(hif_dev->device_id) {
+ case 0x9271:
+ case 0x1006:
+ hif_dev->fw_name = "ar9271.fw";
+ break;
+ case 0x7010:
+ if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
+ hif_dev->fw_name = "ar7010_1_1.fw";
+ else
+ hif_dev->fw_name = "ar7010.fw";
+ break;
+ default:
+ break;
+ }
+
+ if (!hif_dev->fw_name) {
+ dev_err(&udev->dev, "Can't determine firmware !\n");
+ goto err_htc_hw_alloc;
+ }
+
+ ret = ath9k_hif_usb_dev_init(hif_dev);
if (ret) {
ret = -EINVAL;
goto err_hif_init_usb;
@@ -907,12 +933,10 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
void *buf;
int ret;
- buf = kmalloc(4, GFP_KERNEL);
+ buf = kmemdup(&reboot_cmd, 4, GFP_KERNEL);
if (!buf)
return;
- memcpy(buf, &reboot_cmd, 4);
-
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
buf, 4, NULL, HZ);
if (ret)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 0aca49b6fcb6..2daf97b11c08 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -19,6 +19,7 @@
#define AR9271_FIRMWARE 0x501000
#define AR9271_FIRMWARE_TEXT 0x903000
+#define AR7010_FIRMWARE_TEXT 0x906000
#define FIRMWARE_DOWNLOAD 0x30
#define FIRMWARE_DOWNLOAD_COMP 0x31
@@ -90,6 +91,7 @@ struct hif_device_usb {
struct usb_anchor regout_submitted;
struct usb_anchor rx_submitted;
struct sk_buff *remain_skb;
+ const char *fw_name;
int rx_remain_len;
int rx_pkt_len;
int rx_transfer_len;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index c251603ab032..58f52a1dc7ea 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -223,15 +223,6 @@ struct ath9k_htc_sta {
enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
};
-struct ath9k_htc_aggr_work {
- u16 tid;
- u8 sta_addr[ETH_ALEN];
- struct ieee80211_hw *hw;
- struct ieee80211_vif *vif;
- enum ieee80211_ampdu_mlme_action action;
- struct mutex mutex;
-};
-
#define ATH9K_HTC_RXBUF 256
#define HTC_RX_FRAME_HEADER_SIZE 40
@@ -257,12 +248,15 @@ struct ath9k_htc_tx_ctl {
#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
+#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
+
struct ath_tx_stats {
u32 buf_queued;
u32 buf_completed;
u32 skb_queued;
u32 skb_completed;
u32 skb_dropped;
+ u32 queue_stats[WME_NUM_AC];
};
struct ath_rx_stats {
@@ -286,6 +280,8 @@ struct ath9k_debug {
#define TX_STAT_INC(c) do { } while (0)
#define RX_STAT_INC(c) do { } while (0)
+#define TX_QSTAT_INC(c) do { } while (0)
+
#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
#define ATH_LED_PIN_DEF 1
@@ -326,11 +322,10 @@ struct htc_beacon_config {
#define OP_LED_ON BIT(4)
#define OP_PREAMBLE_SHORT BIT(5)
#define OP_PROTECT_ENABLE BIT(6)
-#define OP_TXAGGR BIT(7)
-#define OP_ASSOCIATED BIT(8)
-#define OP_ENABLE_BEACON BIT(9)
-#define OP_LED_DEINIT BIT(10)
-#define OP_UNPLUGGED BIT(11)
+#define OP_ASSOCIATED BIT(7)
+#define OP_ENABLE_BEACON BIT(8)
+#define OP_LED_DEINIT BIT(9)
+#define OP_UNPLUGGED BIT(10)
struct ath9k_htc_priv {
struct device *dev;
@@ -371,8 +366,6 @@ struct ath9k_htc_priv {
struct ath9k_htc_rx rx;
struct tasklet_struct tx_tasklet;
struct sk_buff_head tx_queue;
- struct ath9k_htc_aggr_work aggr_work;
- struct delayed_work ath9k_aggr_work;
struct delayed_work ath9k_ani_work;
struct work_struct ps_work;
@@ -390,13 +383,14 @@ struct ath9k_htc_priv {
int led_off_duration;
int led_on_cnt;
int led_off_cnt;
- int hwq_map[ATH9K_WME_AC_VO+1];
+
+ int beaconq;
+ int cabq;
+ int hwq_map[WME_NUM_AC];
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
struct ath9k_debug debug;
#endif
- struct ath9k_htc_target_rate tgt_rate;
-
struct mutex mutex;
};
@@ -405,6 +399,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
common->bus_ops->read_cachesize(common, csz);
}
+void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif);
void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
@@ -424,8 +419,8 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv);
void ath9k_tx_tasklet(unsigned long data);
int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
-bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
- enum ath9k_tx_queue_subtype qtype);
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
+int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv);
int get_hw_qnum(u16 queue, int *hwq_map);
int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
struct ath9k_tx_queue_info *qinfo);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index c10c7d002eb7..bd1506e69105 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -222,6 +222,29 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
spin_unlock_bh(&priv->beacon_lock);
}
+/* Currently, only for IBSS */
+void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath9k_tx_queue_info qi, qi_be;
+ int qnum = priv->hwq_map[WME_AC_BE];
+
+ memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+ memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
+
+ ath9k_hw_get_txq_props(ah, qnum, &qi_be);
+
+ qi.tqi_aifs = qi_be.tqi_aifs;
+ qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
+ qi.tqi_cwmax = qi_be.tqi_cwmax;
+
+ if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Unable to update beacon queue %u!\n", qnum);
+ } else {
+ ath9k_hw_resettxqueue(ah, priv->beaconq);
+ }
+}
void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index dc015077a8d9..a63ae88abf3e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,13 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
.max_power = 20, \
}
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
static struct ieee80211_channel ath9k_2ghz_channels[] = {
CHAN2G(2412, 0), /* Channel 1 */
CHAN2G(2417, 1), /* Channel 2 */
@@ -51,6 +58,37 @@ static struct ieee80211_channel ath9k_2ghz_channels[] = {
CHAN2G(2484, 13), /* Channel 14 */
};
+static struct ieee80211_channel ath9k_5ghz_channels[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 14), /* Channel 36 */
+ CHAN5G(5200, 15), /* Channel 40 */
+ CHAN5G(5220, 16), /* Channel 44 */
+ CHAN5G(5240, 17), /* Channel 48 */
+ /* _We_ call this UNII 2 */
+ CHAN5G(5260, 18), /* Channel 52 */
+ CHAN5G(5280, 19), /* Channel 56 */
+ CHAN5G(5300, 20), /* Channel 60 */
+ CHAN5G(5320, 21), /* Channel 64 */
+ /* _We_ call this "Middle band" */
+ CHAN5G(5500, 22), /* Channel 100 */
+ CHAN5G(5520, 23), /* Channel 104 */
+ CHAN5G(5540, 24), /* Channel 108 */
+ CHAN5G(5560, 25), /* Channel 112 */
+ CHAN5G(5580, 26), /* Channel 116 */
+ CHAN5G(5600, 27), /* Channel 120 */
+ CHAN5G(5620, 28), /* Channel 124 */
+ CHAN5G(5640, 29), /* Channel 128 */
+ CHAN5G(5660, 30), /* Channel 132 */
+ CHAN5G(5680, 31), /* Channel 136 */
+ CHAN5G(5700, 32), /* Channel 140 */
+ /* _We_ call this UNII 3 */
+ CHAN5G(5745, 33), /* Channel 149 */
+ CHAN5G(5765, 34), /* Channel 153 */
+ CHAN5G(5785, 35), /* Channel 157 */
+ CHAN5G(5805, 36), /* Channel 161 */
+ CHAN5G(5825, 37), /* Channel 165 */
+};
+
/* Atheros hardware rate code addition for short premble */
#define SHPCHECK(__hw_rate, __flags) \
((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
@@ -141,7 +179,7 @@ static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
return htc_connect_service(priv->htc, &req, ep_id);
}
-static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
+static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
{
int ret;
@@ -199,10 +237,33 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
if (ret)
goto err;
+ /*
+ * Setup required credits before initializing HTC.
+ * This is a bit hacky, but, since queuing is done in
+ * the HIF layer, shouldn't matter much.
+ */
+
+ switch(devid) {
+ case 0x9271:
+ case 0x1006:
+ priv->htc->credits = 33;
+ break;
+ case 0x7010:
+ priv->htc->credits = 45;
+ break;
+ default:
+ dev_err(priv->dev, "ath9k_htc: Unsupported device id: 0x%x\n",
+ devid);
+ goto err;
+ }
+
ret = htc_init(priv->htc);
if (ret)
goto err;
+ dev_info(priv->dev, "ath9k_htc: HTC initialized with %d credits\n",
+ priv->htc->credits);
+
return 0;
err:
@@ -398,17 +459,43 @@ static const struct ath_bus_ops ath9k_usb_bus_ops = {
static void setup_ht_cap(struct ath9k_htc_priv *priv,
struct ieee80211_sta_ht_cap *ht_info)
{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ u8 tx_streams, rx_streams;
+ int i;
+
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_SM_PS |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40;
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- ht_info->mcs.rx_mask[0] = 0xff;
+
+ /* ath9k_htc supports only 1 or 2 stream devices */
+ tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, 2);
+ rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, 2);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
+
+ if (tx_streams != rx_streams) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+
+ for (i = 0; i < rx_streams; i++)
+ ht_info->mcs.rx_mask[i] = 0xff;
+
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
}
@@ -420,23 +507,37 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
priv->hwq_map[i] = -1;
- if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) {
+ priv->beaconq = ath9k_hw_beaconq_setup(priv->ah);
+ if (priv->beaconq == -1) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup BEACON xmit queue\n");
+ goto err;
+ }
+
+ priv->cabq = ath9k_htc_cabq_setup(priv);
+ if (priv->cabq == -1) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup CAB xmit queue\n");
+ goto err;
+ }
+
+ if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BE traffic\n");
goto err;
}
- if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) {
+ if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BK traffic\n");
goto err;
}
- if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) {
+ if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VI traffic\n");
goto err;
}
- if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) {
+ if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VO traffic\n");
goto err;
@@ -468,36 +569,6 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
*/
for (i = 0; i < common->keymax; i++)
ath9k_hw_keyreset(priv->ah, (u16) i);
-
- if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)) {
- /*
- * Whether we should enable h/w TKIP MIC.
- * XXX: if we don't support WME TKIP MIC, then we wouldn't
- * report WMM capable, so it's always safe to turn on
- * TKIP MIC in this case.
- */
- ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
- }
-
- /*
- * Check whether the separate key cache entries
- * are required to handle both tx+rx MIC keys.
- * With split mic keys the number of stations is limited
- * to 27 otherwise 59.
- */
- if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)
- && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_MIC, NULL)
- && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
- 0, NULL))
- common->splitmic = 1;
-
- /* turn on mcast key search if possible */
- if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
- (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
- 1, 1, NULL);
}
static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
@@ -512,6 +583,17 @@ static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
ARRAY_SIZE(ath9k_legacy_rates);
}
+
+ if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes)) {
+ priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels;
+ priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+ priv->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ ARRAY_SIZE(ath9k_5ghz_channels);
+ priv->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ ath9k_legacy_rates + 4;
+ priv->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates) - 4;
+ }
}
static void ath9k_init_misc(struct ath9k_htc_priv *priv)
@@ -524,7 +606,6 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
- priv->op_flags |= OP_TXAGGR;
priv->ah->opmode = NL80211_IFTYPE_STATION;
}
@@ -556,14 +637,12 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
spin_lock_init(&priv->beacon_lock);
spin_lock_init(&priv->tx_lock);
mutex_init(&priv->mutex);
- mutex_init(&priv->aggr_work.mutex);
mutex_init(&priv->htc_pm_lock);
tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
(unsigned long)priv);
tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
(unsigned long)priv);
tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
- INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
INIT_WORK(&priv->ps_work, ath9k_ps_work);
@@ -643,11 +722,17 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&priv->sbands[IEEE80211_BAND_2GHZ];
+ if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &priv->sbands[IEEE80211_BAND_5GHZ];
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
setup_ht_cap(priv,
&priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes))
+ setup_ht_cap(priv,
+ &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
}
SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
@@ -747,7 +832,7 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
goto err_free;
}
- ret = ath9k_init_htc_services(priv);
+ ret = ath9k_init_htc_services(priv, devid);
if (ret)
goto err_init;
@@ -790,7 +875,8 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
if (ret)
return ret;
- ret = ath9k_init_htc_services(htc_handle->drv_priv);
+ ret = ath9k_init_htc_services(htc_handle->drv_priv,
+ htc_handle->drv_priv->ah->hw_version.devid);
return ret;
}
#endif
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9d371c18eb41..05445d8a9818 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -27,13 +27,11 @@ static struct dentry *ath9k_debugfs_root;
static void ath_update_txpow(struct ath9k_htc_priv *priv)
{
struct ath_hw *ah = priv->ah;
- u32 txpow;
if (priv->curtxpow != priv->txpowlimit) {
ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
/* read back in case value is clamped */
- ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
- priv->curtxpow = txpow;
+ priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
}
}
@@ -325,142 +323,128 @@ static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
tcap.flags_ext = 0x80601000;
tcap.ampdu_limit = 0xffff0000;
tcap.ampdu_subframes = 20;
- tcap.tx_chainmask_legacy = 1;
+ tcap.tx_chainmask_legacy = priv->ah->caps.tx_chainmask;
tcap.protmode = 1;
- tcap.tx_chainmask = 1;
+ tcap.tx_chainmask = priv->ah->caps.tx_chainmask;
WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
return ret;
}
-static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta *sta,
+ struct ath9k_htc_target_rate *trate)
{
- struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
struct ieee80211_supported_band *sband;
- struct ath9k_htc_target_rate trate;
u32 caps = 0;
- u8 cmd_rsp;
- int i, j, ret;
-
- memset(&trate, 0, sizeof(trate));
+ int i, j;
- /* Only 2GHz is supported */
- sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band];
for (i = 0, j = 0; i < sband->n_bitrates; i++) {
if (sta->supp_rates[sband->band] & BIT(i)) {
- priv->tgt_rate.rates.legacy_rates.rs_rates[j]
+ trate->rates.legacy_rates.rs_rates[j]
= (sband->bitrates[i].bitrate * 2) / 10;
j++;
}
}
- priv->tgt_rate.rates.legacy_rates.rs_nrates = j;
+ trate->rates.legacy_rates.rs_nrates = j;
if (sta->ht_cap.ht_supported) {
for (i = 0, j = 0; i < 77; i++) {
if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
- priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i;
+ trate->rates.ht_rates.rs_rates[j++] = i;
if (j == ATH_HTC_RATE_MAX)
break;
}
- priv->tgt_rate.rates.ht_rates.rs_nrates = j;
+ trate->rates.ht_rates.rs_nrates = j;
caps = WLAN_RC_HT_FLAG;
+ if (sta->ht_cap.mcs.rx_mask[1])
+ caps |= WLAN_RC_DS_FLAG;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
caps |= WLAN_RC_40_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ if (conf_is_ht40(&priv->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40))
+ caps |= WLAN_RC_SGI_FLAG;
+ else if (conf_is_ht20(&priv->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20))
caps |= WLAN_RC_SGI_FLAG;
-
}
- priv->tgt_rate.sta_index = ista->index;
- priv->tgt_rate.isnew = 1;
- trate = priv->tgt_rate;
- priv->tgt_rate.capflags = cpu_to_be32(caps);
- trate.capflags = cpu_to_be32(caps);
+ trate->sta_index = ista->index;
+ trate->isnew = 1;
+ trate->capflags = cpu_to_be32(caps);
+}
+
+static int ath9k_htc_send_rate_cmd(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_target_rate *trate)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret;
+ u8 cmd_rsp;
- WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, trate);
if (ret) {
ath_print(common, ATH_DBG_FATAL,
"Unable to initialize Rate information on target\n");
- return ret;
}
- ath_print(common, ATH_DBG_CONFIG,
- "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps);
- return 0;
+ return ret;
}
-static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40)
+static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta *sta)
{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
-
- if (!conf_is_ht(conf))
- return false;
-
- if (!(priv->op_flags & OP_ASSOCIATED) ||
- (priv->op_flags & OP_SCANNING))
- return false;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+ int ret;
- if (conf_is_ht40(conf)) {
- if (priv->ah->curchan->chanmode &
- (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) {
- return false;
- } else {
- *cw40 = true;
- return true;
- }
- } else { /* ht20 */
- if (priv->ah->curchan->chanmode & CHANNEL_HT20)
- return false;
- else
- return true;
- }
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ ret = ath9k_htc_send_rate_cmd(priv, &trate);
+ if (!ret)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Updated target sta: %pM, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
}
-static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
+static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
{
- struct ath9k_htc_target_rate trate;
struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+ struct ieee80211_sta *sta;
int ret;
- u32 caps = be32_to_cpu(priv->tgt_rate.capflags);
- u8 cmd_rsp;
-
- memset(&trate, 0, sizeof(trate));
-
- trate = priv->tgt_rate;
- if (is_cw40)
- caps |= WLAN_RC_40_FLAG;
- else
- caps &= ~WLAN_RC_40_FLAG;
-
- priv->tgt_rate.capflags = cpu_to_be32(caps);
- trate.capflags = cpu_to_be32(caps);
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
- WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
- if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to update Rate information on target\n");
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ rcu_read_unlock();
return;
}
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ rcu_read_unlock();
- ath_print(common, ATH_DBG_CONFIG, "Rate control updated with "
- "caps:0x%x on target\n", priv->tgt_rate.capflags);
+ ret = ath9k_htc_send_rate_cmd(priv, &trate);
+ if (!ret)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Updated target sta: %pM, rate caps: 0x%X\n",
+ bss_conf->bssid, be32_to_cpu(trate.capflags));
}
-static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
- struct ieee80211_vif *vif,
- u8 *sta_addr, u8 tid, bool oper)
+int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_ampdu_mlme_action action, u16 tid)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_target_aggr aggr;
- struct ieee80211_sta *sta = NULL;
struct ath9k_htc_sta *ista;
int ret = 0;
u8 cmd_rsp;
@@ -469,72 +453,28 @@ static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
return -EINVAL;
memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
-
- rcu_read_lock();
-
- /* Check if we are able to retrieve the station */
- sta = ieee80211_find_sta(vif, sta_addr);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
ista = (struct ath9k_htc_sta *) sta->drv_priv;
- if (oper)
- ista->tid_state[tid] = AGGR_START;
- else
- ista->tid_state[tid] = AGGR_STOP;
-
aggr.sta_index = ista->index;
-
- rcu_read_unlock();
-
- aggr.tidno = tid;
- aggr.aggr_enable = oper;
+ aggr.tidno = tid & 0xf;
+ aggr.aggr_enable = (action == IEEE80211_AMPDU_TX_START) ? true : false;
WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
if (ret)
ath_print(common, ATH_DBG_CONFIG,
"Unable to %s TX aggregation for (%pM, %d)\n",
- (oper) ? "start" : "stop", sta->addr, tid);
+ (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
else
ath_print(common, ATH_DBG_CONFIG,
- "%s aggregation for (%pM, %d)\n",
- (oper) ? "Starting" : "Stopping", sta->addr, tid);
-
- return ret;
-}
+ "%s TX aggregation for (%pM, %d)\n",
+ (aggr.aggr_enable) ? "Starting" : "Stopping",
+ sta->addr, tid);
-void ath9k_htc_aggr_work(struct work_struct *work)
-{
- int ret = 0;
- struct ath9k_htc_priv *priv =
- container_of(work, struct ath9k_htc_priv,
- ath9k_aggr_work.work);
- struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
-
- mutex_lock(&wk->mutex);
-
- switch (wk->action) {
- case IEEE80211_AMPDU_TX_START:
- ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
- wk->tid, true);
- if (!ret)
- ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
- wk->tid);
- break;
- case IEEE80211_AMPDU_TX_STOP:
- ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
- wk->tid, false);
- ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
- break;
- default:
- ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
- "Unknown AMPDU action\n");
- }
+ spin_lock_bh(&priv->tx_lock);
+ ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP;
+ spin_unlock_bh(&priv->tx_lock);
- mutex_unlock(&wk->mutex);
+ return ret;
}
/*********/
@@ -617,6 +557,19 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
"%20s : %10u\n", "SKBs dropped",
priv->debug.tx_stats.skb_dropped);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BE queued",
+ priv->debug.tx_stats.queue_stats[WME_AC_BE]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "BK queued",
+ priv->debug.tx_stats.queue_stats[WME_AC_BK]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VI queued",
+ priv->debug.tx_stats.queue_stats[WME_AC_VI]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "VO queued",
+ priv->debug.tx_stats.queue_stats[WME_AC_VO]);
+
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1054,6 +1007,95 @@ void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
wiphy_rfkill_start_polling(priv->hw->wiphy);
}
+static void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret;
+ u8 cmd_rsp;
+
+ if (!ah->curchan)
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+ /* Reset the HW */
+ ret = ath9k_hw_reset(ah, ah->curchan, false);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset hardware; reset status %d "
+ "(freq %u MHz)\n", ret, ah->curchan->channel);
+ }
+
+ ath_update_txpow(priv);
+
+ /* Start RX */
+ WMI_CMD(WMI_START_RECV_CMDID);
+ ath9k_host_rx_init(priv);
+
+ /* Start TX */
+ htc_start(priv->htc);
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+ ieee80211_wake_queues(hw);
+
+ WMI_CMD(WMI_ENABLE_INTR_CMDID);
+
+ /* Enable LED */
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+}
+
+static void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret;
+ u8 cmd_rsp;
+
+ ath9k_htc_ps_wakeup(priv);
+
+ /* Disable LED */
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+
+ /* Stop TX */
+ ieee80211_stop_queues(hw);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ skb_queue_purge(&priv->tx_queue);
+
+ /* Stop RX */
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ /*
+ * The MIB counters have to be disabled here,
+ * since the target doesn't do it.
+ */
+ ath9k_hw_disable_mib_counters(ah);
+
+ if (!ah->curchan)
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+ /* Reset the HW */
+ ret = ath9k_hw_reset(ah, ah->curchan, false);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset hardware; reset status %d "
+ "(freq %u MHz)\n", ret, ah->curchan->channel);
+ }
+
+ /* Disable the PHY */
+ ath9k_hw_phy_disable(ah);
+
+ ath9k_htc_ps_restore(priv);
+ ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+}
+
/**********************/
/* mac80211 Callbacks */
/**********************/
@@ -1099,7 +1141,7 @@ fail_tx:
return 0;
}
-static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
+static int ath9k_htc_start(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
@@ -1111,10 +1153,16 @@ static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
__be16 htc_mode;
u8 cmd_rsp;
+ mutex_lock(&priv->mutex);
+
ath_print(common, ATH_DBG_CONFIG,
"Starting driver with initial channel: %d MHz\n",
curchan->center_freq);
+ /* Ensure that HW is awake before flushing RX */
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ WMI_CMD(WMI_FLUSH_RECV_CMDID);
+
/* setup initial channel */
init_channel = ath9k_cmn_get_curchannel(hw, ah);
@@ -1127,6 +1175,7 @@ static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
ath_print(common, ATH_DBG_FATAL,
"Unable to reset hardware; reset status %d "
"(freq %u MHz)\n", ret, curchan->center_freq);
+ mutex_unlock(&priv->mutex);
return ret;
}
@@ -1147,31 +1196,14 @@ static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
priv->tx_queues_stop = false;
spin_unlock_bh(&priv->tx_lock);
- if (led) {
- /* Enable LED */
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
- }
-
ieee80211_wake_queues(hw);
- return ret;
-}
-
-static int ath9k_htc_start(struct ieee80211_hw *hw)
-{
- struct ath9k_htc_priv *priv = hw->priv;
- int ret = 0;
-
- mutex_lock(&priv->mutex);
- ret = ath9k_htc_radio_enable(hw, false);
mutex_unlock(&priv->mutex);
return ret;
}
-static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
+static void ath9k_htc_stop(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
@@ -1179,21 +1211,17 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
int ret = 0;
u8 cmd_rsp;
+ mutex_lock(&priv->mutex);
+
if (priv->op_flags & OP_INVALID) {
ath_print(common, ATH_DBG_ANY, "Device not present\n");
+ mutex_unlock(&priv->mutex);
return;
}
- if (led) {
- /* Disable LED */
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
- }
-
/* Cancel all the running timers/work .. */
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_ani_work);
- cancel_delayed_work_sync(&priv->ath9k_aggr_work);
cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
ath9k_led_stop_brightness(priv);
@@ -1202,12 +1230,6 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
WMI_CMD(WMI_DISABLE_INTR_CMDID);
WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
WMI_CMD(WMI_STOP_RECV_CMDID);
- ath9k_hw_phy_disable(ah);
- ath9k_hw_disable(ah);
- ath9k_hw_configpcipowersave(ah, 1, 1);
- ath9k_htc_ps_restore(priv);
- ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
-
skb_queue_purge(&priv->tx_queue);
/* Remove monitor interface here */
@@ -1220,21 +1242,18 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
"Monitor interface removed\n");
}
+ ath9k_hw_phy_disable(ah);
+ ath9k_hw_disable(ah);
+ ath9k_hw_configpcipowersave(ah, 1, 1);
+ ath9k_htc_ps_restore(priv);
+ ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+
priv->op_flags |= OP_INVALID;
ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
-}
-
-static void ath9k_htc_stop(struct ieee80211_hw *hw)
-{
- struct ath9k_htc_priv *priv = hw->priv;
-
- mutex_lock(&priv->mutex);
- ath9k_htc_radio_disable(hw, false);
mutex_unlock(&priv->mutex);
}
-
static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1302,6 +1321,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
out:
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
+
return ret;
}
@@ -1318,6 +1338,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
@@ -1328,6 +1349,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
ath9k_htc_remove_station(priv, vif, NULL);
priv->vif = NULL;
+ ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
@@ -1343,30 +1365,27 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
bool enable_radio = false;
bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+ mutex_lock(&priv->htc_pm_lock);
if (!idle && priv->ps_idle)
enable_radio = true;
-
priv->ps_idle = idle;
+ mutex_unlock(&priv->htc_pm_lock);
if (enable_radio) {
- ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
- ath9k_htc_radio_enable(hw, true);
ath_print(common, ATH_DBG_CONFIG,
"not-idle: enabling radio\n");
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ ath9k_htc_radio_enable(hw);
}
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
- bool is_cw40 = false;
ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
- if (check_rc_update(hw, &is_cw40))
- ath9k_htc_rc_update(priv, is_cw40);
-
ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
@@ -1399,14 +1418,21 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (priv->ps_idle) {
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ mutex_lock(&priv->htc_pm_lock);
+ if (!priv->ps_idle) {
+ mutex_unlock(&priv->htc_pm_lock);
+ goto out;
+ }
+ mutex_unlock(&priv->htc_pm_lock);
+
ath_print(common, ATH_DBG_CONFIG,
"idle: disabling radio\n");
- ath9k_htc_radio_disable(hw, true);
+ ath9k_htc_radio_disable(hw);
}
+out:
mutex_unlock(&priv->mutex);
-
return 0;
}
@@ -1428,8 +1454,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
u32 rfilt;
mutex_lock(&priv->mutex);
-
ath9k_htc_ps_wakeup(priv);
+
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
@@ -1444,30 +1470,38 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
}
-static void ath9k_htc_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
+static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
int ret;
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+ ret = ath9k_htc_add_station(priv, vif, sta);
+ if (!ret)
+ ath9k_htc_init_rate(priv, sta);
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
- switch (cmd) {
- case STA_NOTIFY_ADD:
- ret = ath9k_htc_add_station(priv, vif, sta);
- if (!ret)
- ath9k_htc_init_rate(priv, vif, sta);
- break;
- case STA_NOTIFY_REMOVE:
- ath9k_htc_remove_station(priv, vif, sta);
- break;
- default:
- break;
- }
+ return ret;
+}
+
+static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int ret;
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+ ret = ath9k_htc_remove_station(priv, vif, sta);
+ ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
+
+ return ret;
}
static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -1482,6 +1516,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
return 0;
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1499,9 +1534,16 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
params->cw_max, params->txop);
ret = ath_htc_txq_update(priv, qnum, &qi);
- if (ret)
+ if (ret) {
ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
+ goto out;
+ }
+ if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) &&
+ (qnum == priv->hwq_map[WME_AC_BE]))
+ ath9k_htc_beaconq_config(priv);
+out:
+ ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
return ret;
@@ -1574,7 +1616,6 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_start_ani(priv);
} else {
priv->op_flags &= ~OP_ASSOCIATED;
- cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_ani_work);
}
}
@@ -1631,6 +1672,9 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath9k_hw_init_global_settings(ah);
}
+ if (changed & BSS_CHANGED_HT)
+ ath9k_htc_update_rate(priv, vif, bss_conf);
+
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
@@ -1641,7 +1685,9 @@ static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
u64 tsf;
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
tsf = ath9k_hw_gettsf64(priv->ah);
+ ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
return tsf;
@@ -1652,7 +1698,9 @@ static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
struct ath9k_htc_priv *priv = hw->priv;
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
ath9k_hw_settsf64(priv->ah, tsf);
+ ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
@@ -1660,11 +1708,11 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
- ath9k_htc_ps_wakeup(priv);
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
ath9k_hw_reset_tsf(priv->ah);
- mutex_unlock(&priv->mutex);
ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
}
static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
@@ -1674,8 +1722,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
u16 tid, u16 *ssn)
{
struct ath9k_htc_priv *priv = hw->priv;
- struct ath9k_htc_aggr_work *work = &priv->aggr_work;
struct ath9k_htc_sta *ista;
+ int ret = 0;
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -1683,26 +1731,26 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
+ ret = ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
+ if (!ret)
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
case IEEE80211_AMPDU_TX_STOP:
- if (!(priv->op_flags & OP_TXAGGR))
- return -ENOTSUPP;
- memcpy(work->sta_addr, sta->addr, ETH_ALEN);
- work->hw = hw;
- work->vif = vif;
- work->action = action;
- work->tid = tid;
- ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
+ ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ spin_lock_bh(&priv->tx_lock);
ista->tid_state[tid] = AGGR_OPERATIONAL;
+ spin_unlock_bh(&priv->tx_lock);
break;
default:
ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
"Unknown AMPDU action\n");
}
- return 0;
+ return ret;
}
static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
@@ -1722,8 +1770,8 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath9k_htc_priv *priv = hw->priv;
- ath9k_htc_ps_wakeup(priv);
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
spin_lock_bh(&priv->beacon_lock);
priv->op_flags &= ~OP_SCANNING;
spin_unlock_bh(&priv->beacon_lock);
@@ -1731,8 +1779,8 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
if (priv->op_flags & OP_ASSOCIATED)
ath9k_htc_beacon_config(priv, priv->vif);
ath_start_ani(priv);
- mutex_unlock(&priv->mutex);
ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
}
static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
@@ -1746,8 +1794,10 @@ static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
struct ath9k_htc_priv *priv = hw->priv;
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
priv->ah->coverage_class = coverage_class;
ath9k_hw_init_global_settings(priv->ah);
+ ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
@@ -1759,7 +1809,8 @@ struct ieee80211_ops ath9k_htc_ops = {
.remove_interface = ath9k_htc_remove_interface,
.config = ath9k_htc_config,
.configure_filter = ath9k_htc_configure_filter,
- .sta_notify = ath9k_htc_sta_notify,
+ .sta_add = ath9k_htc_sta_add,
+ .sta_remove = ath9k_htc_sta_remove,
.conf_tx = ath9k_htc_conf_tx,
.bss_info_changed = ath9k_htc_bss_info_changed,
.set_key = ath9k_htc_set_key,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2571b443ac82..bd0b4acc3ece 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -20,19 +20,29 @@
/* TX */
/******/
+#define ATH9K_HTC_INIT_TXQ(subtype) do { \
+ qi.tqi_subtype = subtype; \
+ qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
+ qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
+ qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \
+ qi.tqi_physCompBuf = 0; \
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | \
+ TXQ_FLAG_TXDESCINT_ENABLE; \
+ } while (0)
+
int get_hw_qnum(u16 queue, int *hwq_map)
{
switch (queue) {
case 0:
- return hwq_map[ATH9K_WME_AC_VO];
+ return hwq_map[WME_AC_VO];
case 1:
- return hwq_map[ATH9K_WME_AC_VI];
+ return hwq_map[WME_AC_VI];
case 2:
- return hwq_map[ATH9K_WME_AC_BE];
+ return hwq_map[WME_AC_BE];
case 3:
- return hwq_map[ATH9K_WME_AC_BK];
+ return hwq_map[WME_AC_BK];
default:
- return hwq_map[ATH9K_WME_AC_BE];
+ return hwq_map[WME_AC_BE];
}
}
@@ -71,7 +81,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
struct ath9k_htc_vif *avp;
struct ath9k_htc_tx_ctl tx_ctl;
enum htc_endpoint_id epid;
- u16 qnum, hw_qnum;
+ u16 qnum;
__le16 fc;
u8 *tx_fhdr;
u8 sta_idx;
@@ -131,20 +141,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
qnum = skb_get_queue_mapping(skb);
- hw_qnum = get_hw_qnum(qnum, priv->hwq_map);
- switch (hw_qnum) {
+ switch (qnum) {
case 0:
- epid = priv->data_be_ep;
+ TX_QSTAT_INC(WME_AC_VO);
+ epid = priv->data_vo_ep;
break;
- case 2:
+ case 1:
+ TX_QSTAT_INC(WME_AC_VI);
epid = priv->data_vi_ep;
break;
- case 3:
- epid = priv->data_vo_ep;
+ case 2:
+ TX_QSTAT_INC(WME_AC_BE);
+ epid = priv->data_be_ep;
break;
- case 1:
+ case 3:
default:
+ TX_QSTAT_INC(WME_AC_BK);
epid = priv->data_bk_ep;
break;
}
@@ -174,6 +187,19 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
return htc_send(priv->htc, skb, epid, &tx_ctl);
}
+static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_sta *ista, u8 tid)
+{
+ bool ret = false;
+
+ spin_lock_bh(&priv->tx_lock);
+ if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP))
+ ret = true;
+ spin_unlock_bh(&priv->tx_lock);
+
+ return ret;
+}
+
void ath9k_tx_tasklet(unsigned long data)
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
@@ -203,8 +229,7 @@ void ath9k_tx_tasklet(unsigned long data)
/* Check if we need to start aggregation */
if (sta && conf_is_ht(&priv->hw->conf) &&
- (priv->op_flags & OP_TXAGGR)
- && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 *qc, tid;
struct ath9k_htc_sta *ista;
@@ -213,10 +238,11 @@ void ath9k_tx_tasklet(unsigned long data)
tid = qc[0] & 0xf;
ista = (struct ath9k_htc_sta *)sta->drv_priv;
- if ((tid < ATH9K_HTC_MAX_TID) &&
- ista->tid_state[tid] == AGGR_STOP) {
+ if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
ieee80211_start_tx_ba_session(sta, tid);
+ spin_lock_bh(&priv->tx_lock);
ista->tid_state[tid] = AGGR_PROGRESS;
+ spin_unlock_bh(&priv->tx_lock);
}
}
}
@@ -284,8 +310,7 @@ void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
}
-bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
- enum ath9k_tx_queue_subtype subtype)
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype)
{
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -293,13 +318,7 @@ bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
int qnum;
memset(&qi, 0, sizeof(qi));
-
- qi.tqi_subtype = subtype;
- qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
- qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
- qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
- qi.tqi_physCompBuf = 0;
- qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE;
+ ATH9K_HTC_INIT_TXQ(subtype);
qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
if (qnum == -1)
@@ -317,6 +336,16 @@ bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
return true;
}
+int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_tx_queue_info qi;
+
+ memset(&qi, 0, sizeof(qi));
+ ATH9K_HTC_INIT_TXQ(0);
+
+ return ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_CAB, &qi);
+}
+
/******/
/* RX */
/******/
@@ -387,9 +416,6 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
/* configure operational mode */
ath9k_hw_setopmode(ah);
- /* Handle any link-level address change. */
- ath9k_hw_setmac(ah, common->macaddr);
-
/* calculate and install multicast filter */
mfilt[0] = mfilt[1] = ~0;
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -399,7 +425,7 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
{
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
- ath9k_hw_startpcureceive(priv->ah);
+ ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING));
priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 064397fd738e..705c0f342e1c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -89,7 +89,6 @@ static void htc_process_target_rdy(struct htc_target *target,
struct htc_endpoint *endpoint;
struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
- target->credits = be16_to_cpu(htc_ready_msg->credits);
target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
endpoint = &target->endpoint[ENDPOINT0];
@@ -159,7 +158,7 @@ static int htc_config_pipe_credits(struct htc_target *target)
cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
cp_msg->pipe_id = USB_WLAN_TX_PIPE;
- cp_msg->credits = 28;
+ cp_msg->credits = target->credits;
target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 624422a8169e..381da6c93b14 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -128,6 +128,17 @@ static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
}
+static inline void ath9k_hw_procmibevent(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->ani_proc_mib_event(ah);
+}
+
+static inline void ath9k_hw_ani_monitor(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_ops(ah)->ani_monitor(ah, chan);
+}
+
/* Private hardware call ops */
/* PHY ops */
@@ -277,4 +288,9 @@ static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
}
+static inline void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
+{
+ ath9k_hw_private_ops(ah)->ani_reset(ah, is_scanning);
+}
+
#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c33f17dbe6f1..62597f4ca319 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,11 +23,6 @@
#include "rc.h"
#include "ar9003_mac.h"
-#define ATH9K_CLOCK_RATE_CCK 22
-#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
-#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
-#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
-
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
MODULE_AUTHOR("Atheros Communications");
@@ -80,6 +75,15 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
}
+static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
+{
+ /* You will not have this callback if using the old ANI */
+ if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
+}
+
/********************/
/* Helper Functions */
/********************/
@@ -371,13 +375,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
ah->config.cck_trig_low = 100;
-
- /*
- * For now ANI is disabled for AR9003, it is still
- * being tested.
- */
- if (!AR_SREV_9300_20_OR_LATER(ah))
- ah->config.enable_ani = 1;
+ ah->config.enable_ani = true;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -392,12 +390,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.rx_intr_mitigation = true;
/*
- * Tx IQ Calibration (ah->config.tx_iq_calibration) is only
- * used by AR9003, but it is showing reliability issues.
- * It will take a while to fix so this is currently disabled.
- */
-
- /*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
* _and_ if on non-uniprocessor systems (Multiprocessor/HT).
* This means we use it for all AR5416 devices, and the few
@@ -433,7 +425,9 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->ah_flags = AH_USE_EEPROM;
ah->atim_window = 0;
- ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
+ ah->sta_id1_defaults =
+ AR_STA_ID1_CRPT_MIC_ENABLE |
+ AR_STA_ID1_MCAST_KSRCH;
ah->beacon_interval = 100;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
ah->slottime = (u32) -1;
@@ -571,6 +565,8 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ah->ani_function = ATH9K_ANI_ALL;
if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
ath9k_hw_init_mode_regs(ah);
@@ -627,6 +623,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ar9003_hw_set_nf_limits(ah);
ath9k_init_nfcal_hist_buffer(ah);
+ ah->bb_watchdog_timeout_ms = 25;
common->state = ATH_HW_INITIALIZED;
@@ -1303,6 +1300,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (AR_SREV_9280_10_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ar9002_hw_enable_async_fifo(ah);
+
r = ath9k_hw_process_ini(ah, chan);
if (r)
return r;
@@ -1367,6 +1367,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_resettxqueue(ah, i);
ath9k_hw_init_interrupt_masks(ah, ah->opmode);
+ ath9k_hw_ani_cache_ini_regs(ah);
ath9k_hw_init_qos(ah);
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
@@ -1375,7 +1376,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_global_settings(ah);
if (!AR_SREV_9300_20_OR_LATER(ah)) {
- ar9002_hw_enable_async_fifo(ah);
+ ar9002_hw_update_async_fifo(ah);
ar9002_hw_enable_wep_aggregation(ah);
}
@@ -1426,9 +1427,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
"Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
}
} else {
- /* Configure AR9271 target WLAN */
- if (AR_SREV_9271(ah))
- REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
+ if (common->bus_ops->ath_bus_type == ATH_USB) {
+ /* Configure AR9271 target WLAN */
+ if (AR_SREV_9271(ah))
+ REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
+ else
+ REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
+ }
#ifdef __BIG_ENDIAN
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -1441,6 +1446,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (AR_SREV_9300_20_OR_LATER(ah)) {
ath9k_hw_loadnf(ah, curchan);
ath9k_hw_start_nfcal(ah);
+ ar9003_hw_bb_watchdog_config(ah);
}
return 0;
@@ -1489,6 +1495,7 @@ EXPORT_SYMBOL(ath9k_hw_keyreset);
bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
{
u32 macHi, macLo;
+ u32 unicast_flag = AR_KEYTABLE_VALID;
if (entry >= ah->caps.keycache_size) {
ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
@@ -1497,6 +1504,16 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
}
if (mac != NULL) {
+ /*
+ * AR_KEYTABLE_VALID indicates that the address is a unicast
+ * address, which must match the transmitter address for
+ * decrypting frames.
+ * Not setting this bit allows the hardware to use the key
+ * for multicast frame decryption.
+ */
+ if (mac[0] & 0x01)
+ unicast_flag = 0;
+
macHi = (mac[5] << 8) | mac[4];
macLo = (mac[3] << 24) |
(mac[2] << 16) |
@@ -1509,7 +1526,7 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
macLo = macHi = 0;
}
REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
- REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID);
+ REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
return true;
}
@@ -2165,7 +2182,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
}
#endif
- if (AR_SREV_9271(ah))
+ if (AR_SREV_9271(ah) || AR_SREV_9300_20_OR_LATER(ah))
pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
else
pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
@@ -2220,6 +2237,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->rx_status_len = sizeof(struct ar9003_rxs);
pCap->tx_desc_len = sizeof(struct ar9003_txc);
pCap->txs_len = sizeof(struct ar9003_txs);
+ if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+ pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
} else {
pCap->tx_desc_len = sizeof(struct ath_desc);
if (AR_SREV_9280_20(ah) &&
@@ -2232,100 +2251,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah))
pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
- return 0;
-}
-
-bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
- u32 capability, u32 *result)
-{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
- switch (type) {
- case ATH9K_CAP_CIPHER:
- switch (capability) {
- case ATH9K_CIPHER_AES_CCM:
- case ATH9K_CIPHER_AES_OCB:
- case ATH9K_CIPHER_TKIP:
- case ATH9K_CIPHER_WEP:
- case ATH9K_CIPHER_MIC:
- case ATH9K_CIPHER_CLR:
- return true;
- default:
- return false;
- }
- case ATH9K_CAP_TKIP_MIC:
- switch (capability) {
- case 0:
- return true;
- case 1:
- return (ah->sta_id1_defaults &
- AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
- false;
- }
- case ATH9K_CAP_TKIP_SPLIT:
- return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
- false : true;
- case ATH9K_CAP_MCAST_KEYSRCH:
- switch (capability) {
- case 0:
- return true;
- case 1:
- if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
- return false;
- } else {
- return (ah->sta_id1_defaults &
- AR_STA_ID1_MCAST_KSRCH) ? true :
- false;
- }
- }
- return false;
- case ATH9K_CAP_TXPOW:
- switch (capability) {
- case 0:
- return 0;
- case 1:
- *result = regulatory->power_limit;
- return 0;
- case 2:
- *result = regulatory->max_power_level;
- return 0;
- case 3:
- *result = regulatory->tp_scale;
- return 0;
- }
- return false;
- case ATH9K_CAP_DS:
- return (AR_SREV_9280_20_OR_LATER(ah) &&
- (ah->eep_ops->get_eeprom(ah, EEP_RC_CHAIN_MASK) == 1))
- ? false : true;
- default:
- return false;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_getcapability);
+ if (AR_SREV_9287_10_OR_LATER(ah) || AR_SREV_9271(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
-bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
- u32 capability, u32 setting, int *status)
-{
- switch (type) {
- case ATH9K_CAP_TKIP_MIC:
- if (setting)
- ah->sta_id1_defaults |=
- AR_STA_ID1_CRPT_MIC_ENABLE;
- else
- ah->sta_id1_defaults &=
- ~AR_STA_ID1_CRPT_MIC_ENABLE;
- return true;
- case ATH9K_CAP_MCAST_KEYSRCH:
- if (setting)
- ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
- else
- ah->sta_id1_defaults &= ~AR_STA_ID1_MCAST_KSRCH;
- return true;
- default:
- return false;
- }
+ return 0;
}
-EXPORT_SYMBOL(ath9k_hw_setcapability);
/****************************/
/* GPIO / RFKILL / Antennae */
@@ -2520,12 +2450,6 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
}
EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
-void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
-{
- memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN);
-}
-EXPORT_SYMBOL(ath9k_hw_setmac);
-
void ath9k_hw_setopmode(struct ath_hw *ah)
{
ath9k_hw_set_operating_mode(ah, ah->opmode);
@@ -2598,21 +2522,6 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
}
EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
-/*
- * Extend 15-bit time stamp from rx descriptor to
- * a full 64-bit TSF using the current h/w TSF.
-*/
-u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
-{
- u64 tsf;
-
- tsf = ath9k_hw_gettsf64(ah);
- if ((tsf & 0x7fff) < rstamp)
- tsf -= 0x8000;
- return (tsf & ~0x7fff) | rstamp;
-}
-EXPORT_SYMBOL(ath9k_hw_extend_tsf);
-
void ath9k_hw_set11nmac2040(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 77245dff5993..5ecbfcf7470a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -158,6 +158,9 @@
#define ATH9K_HW_RX_HP_QDEPTH 16
#define ATH9K_HW_RX_LP_QDEPTH 128
+#define PAPRD_GAIN_TABLE_ENTRIES 32
+#define PAPRD_TABLE_SZ 24
+
enum ath_ini_subsys {
ATH_INI_PRE = 0,
ATH_INI_CORE,
@@ -199,15 +202,8 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
ATH9K_HW_CAP_LDPC = BIT(19),
ATH9K_HW_CAP_FASTCLOCK = BIT(20),
-};
-
-enum ath9k_capability_type {
- ATH9K_CAP_CIPHER = 0,
- ATH9K_CAP_TKIP_MIC,
- ATH9K_CAP_TKIP_SPLIT,
- ATH9K_CAP_TXPOW,
- ATH9K_CAP_MCAST_KEYSRCH,
- ATH9K_CAP_DS
+ ATH9K_HW_CAP_SGI_20 = BIT(21),
+ ATH9K_HW_CAP_PAPRD = BIT(22),
};
struct ath9k_hw_capabilities {
@@ -237,7 +233,7 @@ struct ath9k_ops_config {
int sw_beacon_response_time;
int additional_swba_backoff;
int ack_6mb;
- int cwm_ignore_extcca;
+ u32 cwm_ignore_extcca;
u8 pcie_powersave_enable;
u8 pcie_clock_req;
u32 pcie_waen;
@@ -262,10 +258,10 @@ struct ath9k_ops_config {
#define AR_BASE_FREQ_5GHZ 4900
#define AR_SPUR_FEEQ_BOUND_HT40 19
#define AR_SPUR_FEEQ_BOUND_HT20 10
- bool tx_iq_calibration; /* Only available for >= AR9003 */
int spurmode;
u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
+ u16 ani_poll_interval; /* ANI poll interval in ms */
};
enum ath9k_int {
@@ -279,6 +275,7 @@ enum ath9k_int {
ATH9K_INT_TX = 0x00000040,
ATH9K_INT_TXDESC = 0x00000080,
ATH9K_INT_TIM_TIMER = 0x00000100,
+ ATH9K_INT_BB_WATCHDOG = 0x00000400,
ATH9K_INT_TXURN = 0x00000800,
ATH9K_INT_MIB = 0x00001000,
ATH9K_INT_RXPHY = 0x00004000,
@@ -358,6 +355,9 @@ struct ath9k_channel {
int8_t iCoff;
int8_t qCoff;
int16_t rawNoiseFloor;
+ bool paprd_done;
+ u16 small_signal_gain[AR9300_MAX_CHAINS];
+ u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
};
#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
@@ -459,7 +459,7 @@ struct ath9k_hw_version {
#define AR_GENTMR_BIT(_index) (1 << (_index))
/*
- * Using de Bruijin sequence to to look up 1's index in a 32 bit number
+ * Using de Bruijin sequence to look up 1's index in a 32 bit number
* debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
*/
#define debruijn32 0x077CB531U
@@ -510,6 +510,17 @@ struct ath_gen_timer_table {
* @setup_calibration: set up calibration
* @iscal_supported: used to query if a type of calibration is supported
* @loadnf: load noise floor read from each chain on the CCA registers
+ *
+ * @ani_reset: reset ANI parameters to default values
+ * @ani_lower_immunity: lower the noise immunity level. The level controls
+ * the power-based packet detection on hardware. If a power jump is
+ * detected the adapter takes it as an indication that a packet has
+ * arrived. The level ranges from 0-5. Each level corresponds to a
+ * few dB more of noise immunity. If you have a strong time-varying
+ * interference that is causing false detections (OFDM timing errors or
+ * CCK timing errors) the level can be increased.
+ * @ani_cache_ini_regs: cache the values for ANI from the initial
+ * register settings through the register initialization.
*/
struct ath_hw_private_ops {
/* Calibration ops */
@@ -553,6 +564,11 @@ struct ath_hw_private_ops {
int param);
void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan);
+
+ /* ANI */
+ void (*ani_reset)(struct ath_hw *ah, bool is_scanning);
+ void (*ani_lower_immunity)(struct ath_hw *ah);
+ void (*ani_cache_ini_regs)(struct ath_hw *ah);
};
/**
@@ -563,6 +579,11 @@ struct ath_hw_private_ops {
*
* @config_pci_powersave:
* @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ *
+ * @ani_proc_mib_event: process MIB events, this would happen upon specific ANI
+ * thresholds being reached or having overflowed.
+ * @ani_monitor: called periodically by the core driver to collect
+ * MIB stats and adjust ANI if specific thresholds have been reached.
*/
struct ath_hw_ops {
void (*config_pci_powersave)(struct ath_hw *ah,
@@ -603,6 +624,9 @@ struct ath_hw_ops {
u32 burstDuration);
void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
u32 vmf);
+
+ void (*ani_proc_mib_event)(struct ath_hw *ah);
+ void (*ani_monitor)(struct ath_hw *ah, struct ath9k_channel *chan);
};
struct ath_hw {
@@ -789,6 +813,12 @@ struct ath_hw {
u32 ts_paddr_end;
u16 ts_tail;
u8 ts_size;
+
+ u32 bb_watchdog_last_status;
+ u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
+
+ u32 paprd_gain_table_entries[PAPRD_GAIN_TABLE_ENTRIES];
+ u8 paprd_gain_table_index[PAPRD_GAIN_TABLE_ENTRIES];
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -818,10 +848,6 @@ int ath9k_hw_init(struct ath_hw *ah);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange);
int ath9k_hw_fill_cap_info(struct ath_hw *ah);
-bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
- u32 capability, u32 *result);
-bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
- u32 capability, u32 setting, int *status);
u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
/* Key Cache Management */
@@ -856,7 +882,6 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
bool ath9k_hw_phy_disable(struct ath_hw *ah);
bool ath9k_hw_disable(struct ath_hw *ah);
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
-void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac);
void ath9k_hw_setopmode(struct ath_hw *ah);
void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
void ath9k_hw_setbssidmask(struct ath_hw *ah);
@@ -865,7 +890,6 @@ u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
-u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
@@ -907,13 +931,26 @@ void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
int ar9002_hw_rf_claim(struct ath_hw *ah);
void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
+void ar9002_hw_update_async_fifo(struct ath_hw *ah);
void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
/*
- * Code specifric to AR9003, we stuff these here to avoid callbacks
+ * Code specific to AR9003, we stuff these here to avoid callbacks
* for older families
*/
void ar9003_hw_set_nf_limits(struct ath_hw *ah);
+void ar9003_hw_bb_watchdog_config(struct ath_hw *ah);
+void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
+void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
+void ar9003_paprd_enable(struct ath_hw *ah, bool val);
+void ar9003_paprd_populate_single_table(struct ath_hw *ah,
+ struct ath9k_channel *chan, int chain);
+int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
+ int chain);
+int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
+int ar9003_paprd_init_table(struct ath_hw *ah);
+bool ar9003_paprd_is_done(struct ath_hw *ah);
+void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
/* Hardware family op attach helpers */
void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
@@ -926,8 +963,24 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
void ar9002_hw_attach_ops(struct ath_hw *ah);
void ar9003_hw_attach_ops(struct ath_hw *ah);
+/*
+ * ANI work can be shared between all families but a next
+ * generation implementation of ANI will be used only for AR9003 only
+ * for now as the other families still need to be tested with the same
+ * next generation ANI. Feel free to start testing it though for the
+ * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani.
+ */
+extern int modparam_force_new_ani;
+void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah);
+void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah);
+
#define ATH_PCIE_CAP_LINK_CTRL 0x70
#define ATH_PCIE_CAP_LINK_L0S 1
#define ATH_PCIE_CAP_LINK_L1 2
+#define ATH9K_CLOCK_RATE_CCK 22
+#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
+#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
+#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d457cb3bd772..514a4014c198 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -175,18 +175,6 @@ static const struct ath_ops ath9k_common_ops = {
.write = ath9k_iowrite32,
};
-static int count_streams(unsigned int chainmask, int max)
-{
- int streams = 0;
-
- do {
- if (++streams == max)
- break;
- } while ((chainmask = chainmask & (chainmask - 1)));
-
- return streams;
-}
-
/**************************/
/* Initialization */
/**************************/
@@ -208,6 +196,9 @@ static void setup_ht_cap(struct ath_softc *sc,
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
@@ -224,8 +215,8 @@ static void setup_ht_cap(struct ath_softc *sc,
/* set up supported mcs set */
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = count_streams(common->tx_chainmask, max_streams);
- rx_streams = count_streams(common->rx_chainmask, max_streams);
+ tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
+ rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
ath_print(common, ATH_DBG_CONFIG,
"TX streams %d, RX streams: %d\n",
@@ -388,36 +379,14 @@ static void ath9k_init_crypto(struct ath_softc *sc)
for (i = 0; i < common->keymax; i++)
ath9k_hw_keyreset(sc->sc_ah, (u16) i);
- if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)) {
- /*
- * Whether we should enable h/w TKIP MIC.
- * XXX: if we don't support WME TKIP MIC, then we wouldn't
- * report WMM capable, so it's always safe to turn on
- * TKIP MIC in this case.
- */
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
- }
-
/*
* Check whether the separate key cache entries
* are required to handle both tx+rx MIC keys.
* With split mic keys the number of stations is limited
* to 27 otherwise 59.
*/
- if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)
- && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_MIC, NULL)
- && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
- 0, NULL))
+ if (!(sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA))
common->splitmic = 1;
-
- /* turn on mcast key search if possible */
- if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
- (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
- 1, 1, NULL);
-
}
static int ath9k_init_btcoex(struct ath_softc *sc)
@@ -435,7 +404,7 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
r = ath_init_btcoex_timer(sc);
if (r)
return -1;
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+ qnum = sc->tx.hwq_map[WME_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
break;
@@ -472,23 +441,23 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+ if (!ath_tx_setup(sc, WME_AC_BK)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BK traffic\n");
goto err;
}
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+ if (!ath_tx_setup(sc, WME_AC_BE)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BE traffic\n");
goto err;
}
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+ if (!ath_tx_setup(sc, WME_AC_VI)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VI traffic\n");
goto err;
}
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+ if (!ath_tx_setup(sc, WME_AC_VO)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VO traffic\n");
goto err;
@@ -745,6 +714,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
goto error_world;
}
+ INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
sc->wiphy_scheduler_int = msecs_to_jiffies(500);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 0e425cb4bbb1..e955bb9d98cb 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "hw-ops.h"
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
struct ath9k_tx_queue_info *qi)
@@ -554,8 +555,13 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REGWRITE_BUFFER_FLUSH(ah);
DISABLE_REGWRITE_BUFFER(ah);
- /* cwmin and cwmax should be 0 for beacon queue */
- if (AR_SREV_9300_20_OR_LATER(ah)) {
+ /*
+ * cwmin and cwmax should be 0 for beacon queue
+ * but not for IBSS as we would create an imbalance
+ * on beaconing fairness for participating nodes.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah) &&
+ ah->opmode != NL80211_IFTYPE_ADHOC) {
REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
| SM(0, AR_D_LCL_IFS_CWMAX)
| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
@@ -756,11 +762,11 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
}
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
-void ath9k_hw_startpcureceive(struct ath_hw *ah)
+void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
{
ath9k_enable_mib_counters(ah);
- ath9k_ani_reset(ah);
+ ath9k_ani_reset(ah, is_scanning);
REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 00f3e0c7528a..7559fb2b28a5 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -577,13 +577,8 @@ enum ath9k_tx_queue {
#define ATH9K_NUM_TX_QUEUES 10
-enum ath9k_tx_queue_subtype {
- ATH9K_WME_AC_BK = 0,
- ATH9K_WME_AC_BE,
- ATH9K_WME_AC_VI,
- ATH9K_WME_AC_VO,
- ATH9K_WME_UPSD
-};
+/* Used as a queue subtype instead of a WMM AC */
+#define ATH9K_WME_UPSD 4
enum ath9k_tx_queue_flags {
TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
@@ -617,7 +612,7 @@ enum ath9k_pkt_type {
struct ath9k_tx_queue_info {
u32 tqi_ver;
enum ath9k_tx_queue tqi_type;
- enum ath9k_tx_queue_subtype tqi_subtype;
+ int tqi_subtype;
enum ath9k_tx_queue_flags tqi_qflags;
u32 tqi_priority;
u32 tqi_aifs;
@@ -715,7 +710,7 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 size, u32 flags);
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
-void ath9k_hw_startpcureceive(struct ath_hw *ah);
+void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_stoppcurecv(struct ath_hw *ah);
void ath9k_hw_abortpcurecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index abfa0493236f..c8de50fa6378 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -51,13 +51,11 @@ static void ath_cache_conf_rate(struct ath_softc *sc,
static void ath_update_txpow(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- u32 txpow;
if (sc->curtxpow != sc->config.txpowlimit) {
ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
/* read back in case value is clamped */
- ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
- sc->curtxpow = txpow;
+ sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
}
}
@@ -232,6 +230,113 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
return r;
}
+static void ath_paprd_activate(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ int chain;
+
+ if (!ah->curchan->paprd_done)
+ return;
+
+ ath9k_ps_wakeup(sc);
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.tx_chainmask & BIT(chain)))
+ continue;
+
+ ar9003_paprd_populate_single_table(ah, ah->curchan, chain);
+ }
+
+ ar9003_paprd_enable(ah, true);
+ ath9k_ps_restore(sc);
+}
+
+void ath_paprd_calibrate(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb = NULL;
+ struct ieee80211_tx_info *tx_info;
+ int band = hw->conf.channel->band;
+ struct ieee80211_supported_band *sband = &sc->sbands[band];
+ struct ath_tx_control txctl;
+ int qnum, ftype;
+ int chain_ok = 0;
+ int chain;
+ int len = 1800;
+ int time_left;
+ int i;
+
+ ath9k_ps_wakeup(sc);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ skb_put(skb, len);
+ memset(skb->data, 0, len);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
+ hdr->frame_control = cpu_to_le16(ftype);
+ hdr->duration_id = 10;
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ memset(&txctl, 0, sizeof(txctl));
+ qnum = sc->tx.hwq_map[WME_AC_BE];
+ txctl.txq = &sc->tx.txq[qnum];
+
+ ar9003_paprd_init_table(ah);
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->caps.tx_chainmask & BIT(chain)))
+ continue;
+
+ chain_ok = 0;
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->band = band;
+
+ for (i = 0; i < 4; i++) {
+ tx_info->control.rates[i].idx = sband->n_bitrates - 1;
+ tx_info->control.rates[i].count = 6;
+ }
+
+ init_completion(&sc->paprd_complete);
+ ar9003_paprd_setup_gain_table(ah, chain);
+ txctl.paprd = BIT(chain);
+ if (ath_tx_start(hw, skb, &txctl) != 0)
+ break;
+
+ time_left = wait_for_completion_timeout(&sc->paprd_complete,
+ 100);
+ if (!time_left) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "Timeout waiting for paprd training on "
+ "TX chain %d\n",
+ chain);
+ break;
+ }
+
+ if (!ar9003_paprd_is_done(ah))
+ break;
+
+ if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0)
+ break;
+
+ chain_ok = 1;
+ }
+ kfree_skb(skb);
+
+ if (chain_ok) {
+ ah->curchan->paprd_done = true;
+ ath_paprd_activate(sc);
+ }
+
+ ath9k_ps_restore(sc);
+}
+
/*
* This routine performs the periodic noise floor calibration function
* that is used to adjust and optimize the chip performance. This
@@ -285,7 +390,8 @@ void ath_ani_calibrate(unsigned long data)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ if ((timestamp - common->ani.checkani_timer) >=
+ ah->config.ani_poll_interval) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -326,15 +432,24 @@ set_timer:
*/
cal_interval = ATH_LONG_CALINTERVAL;
if (sc->sc_ah->config.enable_ani)
- cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+ cal_interval = min(cal_interval,
+ (u32)ah->config.ani_poll_interval);
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) &&
+ !(sc->sc_flags & SC_OP_SCANNING)) {
+ if (!sc->sc_ah->curchan->paprd_done)
+ ieee80211_queue_work(sc->hw, &sc->paprd_work);
+ else
+ ath_paprd_activate(sc);
+ }
}
static void ath_start_ani(struct ath_common *common)
{
+ struct ath_hw *ah = common->ah;
unsigned long timestamp = jiffies_to_msecs(jiffies);
common->ani.longcal_timer = timestamp;
@@ -342,7 +457,8 @@ static void ath_start_ani(struct ath_common *common)
common->ani.checkani_timer = timestamp;
mod_timer(&common->ani.timer,
- jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+ jiffies +
+ msecs_to_jiffies((u32)ah->config.ani_poll_interval));
}
/*
@@ -520,6 +636,12 @@ irqreturn_t ath_isr(int irq, void *dev)
!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
goto chip_reset;
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ (status & ATH9K_INT_BB_WATCHDOG)) {
+ ar9003_hw_bb_watchdog_dbg_info(ah);
+ goto chip_reset;
+ }
+
if (status & ATH9K_INT_SWBA)
tasklet_schedule(&sc->bcon_tasklet);
@@ -615,234 +737,6 @@ static u32 ath_get_extchanmode(struct ath_softc *sc,
return chanmode;
}
-static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
- struct ath9k_keyval *hk, const u8 *addr,
- bool authenticator)
-{
- struct ath_hw *ah = common->ah;
- const u8 *key_rxmic;
- const u8 *key_txmic;
-
- key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
- key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
-
- if (addr == NULL) {
- /*
- * Group key installation - only two key cache entries are used
- * regardless of splitmic capability since group key is only
- * used either for TX or RX.
- */
- if (authenticator) {
- memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
- memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
- } else {
- memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
- memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
- }
- return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
- }
- if (!common->splitmic) {
- /* TX and RX keys share the same key cache entry. */
- memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
- memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
- return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
- }
-
- /* Separate key cache entries for TX and RX */
-
- /* TX key goes at first index, RX key at +32. */
- memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
- if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
- /* TX MIC entry failed. No need to proceed further */
- ath_print(common, ATH_DBG_FATAL,
- "Setting TX MIC Key Failed\n");
- return 0;
- }
-
- memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
- /* XXX delete tx key on failure? */
- return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
-}
-
-static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
-{
- int i;
-
- for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
- if (test_bit(i, common->keymap) ||
- test_bit(i + 64, common->keymap))
- continue; /* At least one part of TKIP key allocated */
- if (common->splitmic &&
- (test_bit(i + 32, common->keymap) ||
- test_bit(i + 64 + 32, common->keymap)))
- continue; /* At least one part of TKIP key allocated */
-
- /* Found a free slot for a TKIP key */
- return i;
- }
- return -1;
-}
-
-static int ath_reserve_key_cache_slot(struct ath_common *common)
-{
- int i;
-
- /* First, try to find slots that would not be available for TKIP. */
- if (common->splitmic) {
- for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
- if (!test_bit(i, common->keymap) &&
- (test_bit(i + 32, common->keymap) ||
- test_bit(i + 64, common->keymap) ||
- test_bit(i + 64 + 32, common->keymap)))
- return i;
- if (!test_bit(i + 32, common->keymap) &&
- (test_bit(i, common->keymap) ||
- test_bit(i + 64, common->keymap) ||
- test_bit(i + 64 + 32, common->keymap)))
- return i + 32;
- if (!test_bit(i + 64, common->keymap) &&
- (test_bit(i , common->keymap) ||
- test_bit(i + 32, common->keymap) ||
- test_bit(i + 64 + 32, common->keymap)))
- return i + 64;
- if (!test_bit(i + 64 + 32, common->keymap) &&
- (test_bit(i, common->keymap) ||
- test_bit(i + 32, common->keymap) ||
- test_bit(i + 64, common->keymap)))
- return i + 64 + 32;
- }
- } else {
- for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
- if (!test_bit(i, common->keymap) &&
- test_bit(i + 64, common->keymap))
- return i;
- if (test_bit(i, common->keymap) &&
- !test_bit(i + 64, common->keymap))
- return i + 64;
- }
- }
-
- /* No partially used TKIP slots, pick any available slot */
- for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
- /* Do not allow slots that could be needed for TKIP group keys
- * to be used. This limitation could be removed if we know that
- * TKIP will not be used. */
- if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
- continue;
- if (common->splitmic) {
- if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
- continue;
- if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
- continue;
- }
-
- if (!test_bit(i, common->keymap))
- return i; /* Found a free slot for a key */
- }
-
- /* No free slot found */
- return -1;
-}
-
-static int ath_key_config(struct ath_common *common,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct ath_hw *ah = common->ah;
- struct ath9k_keyval hk;
- const u8 *mac = NULL;
- int ret = 0;
- int idx;
-
- memset(&hk, 0, sizeof(hk));
-
- switch (key->alg) {
- case ALG_WEP:
- hk.kv_type = ATH9K_CIPHER_WEP;
- break;
- case ALG_TKIP:
- hk.kv_type = ATH9K_CIPHER_TKIP;
- break;
- case ALG_CCMP:
- hk.kv_type = ATH9K_CIPHER_AES_CCM;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- hk.kv_len = key->keylen;
- memcpy(hk.kv_val, key->key, key->keylen);
-
- if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
- /* For now, use the default keys for broadcast keys. This may
- * need to change with virtual interfaces. */
- idx = key->keyidx;
- } else if (key->keyidx) {
- if (WARN_ON(!sta))
- return -EOPNOTSUPP;
- mac = sta->addr;
-
- if (vif->type != NL80211_IFTYPE_AP) {
- /* Only keyidx 0 should be used with unicast key, but
- * allow this for client mode for now. */
- idx = key->keyidx;
- } else
- return -EIO;
- } else {
- if (WARN_ON(!sta))
- return -EOPNOTSUPP;
- mac = sta->addr;
-
- if (key->alg == ALG_TKIP)
- idx = ath_reserve_key_cache_slot_tkip(common);
- else
- idx = ath_reserve_key_cache_slot(common);
- if (idx < 0)
- return -ENOSPC; /* no free key cache entries */
- }
-
- if (key->alg == ALG_TKIP)
- ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
- vif->type == NL80211_IFTYPE_AP);
- else
- ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
-
- if (!ret)
- return -EIO;
-
- set_bit(idx, common->keymap);
- if (key->alg == ALG_TKIP) {
- set_bit(idx + 64, common->keymap);
- if (common->splitmic) {
- set_bit(idx + 32, common->keymap);
- set_bit(idx + 64 + 32, common->keymap);
- }
- }
-
- return idx;
-}
-
-static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
-{
- struct ath_hw *ah = common->ah;
-
- ath9k_hw_keyreset(ah, key->hw_key_idx);
- if (key->hw_key_idx < IEEE80211_WEP_NKID)
- return;
-
- clear_bit(key->hw_key_idx, common->keymap);
- if (key->alg != ALG_TKIP)
- return;
-
- clear_bit(key->hw_key_idx + 64, common->keymap);
- if (common->splitmic) {
- ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
- clear_bit(key->hw_key_idx + 32, common->keymap);
- clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
- }
-}
-
static void ath9k_bss_assoc_info(struct ath_softc *sc,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
@@ -1026,25 +920,25 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
return r;
}
-int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
+static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
{
int qnum;
switch (queue) {
case 0:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
+ qnum = sc->tx.hwq_map[WME_AC_VO];
break;
case 1:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
+ qnum = sc->tx.hwq_map[WME_AC_VI];
break;
case 2:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
+ qnum = sc->tx.hwq_map[WME_AC_BE];
break;
case 3:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
+ qnum = sc->tx.hwq_map[WME_AC_BK];
break;
default:
- qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
+ qnum = sc->tx.hwq_map[WME_AC_BE];
break;
}
@@ -1056,16 +950,16 @@ int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
int qnum;
switch (queue) {
- case ATH9K_WME_AC_VO:
+ case WME_AC_VO:
qnum = 0;
break;
- case ATH9K_WME_AC_VI:
+ case WME_AC_VI:
qnum = 1;
break;
- case ATH9K_WME_AC_BE:
+ case WME_AC_BE:
qnum = 2;
break;
- case ATH9K_WME_AC_BK:
+ case WME_AC_BK:
qnum = 3;
break;
default:
@@ -1195,7 +1089,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
ATH9K_INT_GLOBAL;
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP;
+ ah->imask |= ATH9K_INT_RXHP |
+ ATH9K_INT_RXLP |
+ ATH9K_INT_BB_WATCHDOG;
else
ah->imask |= ATH9K_INT_RX;
@@ -1245,6 +1141,7 @@ static int ath9k_tx(struct ieee80211_hw *hw,
struct ath_tx_control txctl;
int padpos, padsize;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int qnum;
if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
ath_print(common, ATH_DBG_XMIT,
@@ -1274,7 +1171,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
* completed and if needed, also for RX of buffered frames.
*/
ath9k_ps_wakeup(sc);
- ath9k_hw_setrxabort(sc->sc_ah, 0);
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ ath9k_hw_setrxabort(sc->sc_ah, 0);
if (ieee80211_is_pspoll(hdr->frame_control)) {
ath_print(common, ATH_DBG_PS,
"Sending PS-Poll to pick a buffered frame\n");
@@ -1316,11 +1214,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
memmove(skb->data, skb->data + padsize, padpos);
}
- /* Check if a tx queue is available */
-
- txctl.txq = ath_test_get_txq(sc, skb);
- if (!txctl.txq)
- goto exit;
+ qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
+ txctl.txq = &sc->tx.txq[qnum];
ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
@@ -1348,6 +1243,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&sc->ath_led_blink_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_work_sync(&sc->paprd_work);
if (!sc->num_sec_wiphy) {
cancel_delayed_work_sync(&sc->wiphy_work);
@@ -1538,8 +1434,8 @@ void ath9k_enable_ps(struct ath_softc *sc)
ah->imask |= ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(ah, ah->imask);
}
+ ath9k_hw_setrxabort(ah, 1);
}
- ath9k_hw_setrxabort(ah, 1);
}
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1776,7 +1672,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
- if ((qnum == sc->tx.hwq_map[ATH9K_WME_AC_BE]) && !ret)
+ if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret)
ath_beaconq_config(sc);
mutex_unlock(&sc->mutex);
@@ -1804,7 +1700,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
switch (cmd) {
case SET_KEY:
- ret = ath_key_config(common, vif, sta, key);
+ ret = ath9k_cmn_key_config(common, vif, sta, key);
if (ret >= 0) {
key->hw_key_idx = ret;
/* push IV and Michael MIC generation to stack */
@@ -1817,7 +1713,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
}
break;
case DISABLE_KEY:
- ath_key_delete(common, key);
+ ath9k_cmn_key_delete(common, key);
break;
default:
ret = -EINVAL;
@@ -1990,6 +1886,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
struct ath_softc *sc = aphy->sc;
int ret = 0;
+ local_bh_disable();
+
switch (action) {
case IEEE80211_AMPDU_RX_START:
if (!(sc->sc_flags & SC_OP_RXAGGR))
@@ -2019,6 +1917,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
"Unknown AMPDU action\n");
}
+ local_bh_enable();
+
return ret;
}
@@ -2063,6 +1963,7 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
ath9k_wiphy_pause_all_forced(sc, aphy);
sc->sc_flags |= SC_OP_SCANNING;
del_timer_sync(&common->ani.timer);
+ cancel_work_sync(&sc->paprd_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
mutex_unlock(&sc->mutex);
}
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 1ec836cf1c0d..257b10ba6f57 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
{ 0 }
};
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 8519452c95f1..600ee0ba2880 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -20,7 +20,7 @@
#include "ath9k.h"
static const struct ath_rate_table ar5416_11na_ratetable = {
- 42,
+ 43,
8, /* MCS start */
{
{ VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
@@ -40,73 +40,75 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
{ VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
29300, 7, 108, 4, 7, 7, 7, 7 },
{ VALID_2040, VALID_2040, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */
- 6400, 0, 0, 0, 8, 24, 8, 24 },
+ 6400, 0, 0, 0, 8, 25, 8, 25 },
{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */
- 12700, 1, 1, 2, 9, 25, 9, 25 },
+ 12700, 1, 1, 2, 9, 26, 9, 26 },
{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */
- 18800, 2, 2, 2, 10, 26, 10, 26 },
+ 18800, 2, 2, 2, 10, 27, 10, 27 },
{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */
- 25000, 3, 3, 4, 11, 27, 11, 27 },
+ 25000, 3, 3, 4, 11, 28, 11, 28 },
{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */
- 36700, 4, 4, 4, 12, 28, 12, 28 },
+ 36700, 4, 4, 4, 12, 29, 12, 29 },
{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */
- 48100, 5, 5, 4, 13, 29, 13, 29 },
+ 48100, 5, 5, 4, 13, 30, 13, 30 },
{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */
- 53500, 6, 6, 4, 14, 30, 14, 30 },
+ 53500, 6, 6, 4, 14, 31, 14, 31 },
{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */
- 59000, 7, 7, 4, 15, 31, 15, 32 },
+ 59000, 7, 7, 4, 15, 32, 15, 33 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */
- 12700, 8, 8, 3, 16, 33, 16, 33 },
+ 12700, 8, 8, 3, 16, 34, 16, 34 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */
- 24800, 9, 9, 2, 17, 34, 17, 34 },
+ 24800, 9, 9, 2, 17, 35, 17, 35 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */
- 36600, 10, 10, 2, 18, 35, 18, 35 },
+ 36600, 10, 10, 2, 18, 36, 18, 36 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */
- 48100, 11, 11, 4, 19, 36, 19, 36 },
+ 48100, 11, 11, 4, 19, 37, 19, 37 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */
- 69500, 12, 12, 4, 20, 37, 20, 37 },
+ 69500, 12, 12, 4, 20, 38, 20, 38 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */
- 89500, 13, 13, 4, 21, 38, 21, 38 },
+ 89500, 13, 13, 4, 21, 39, 21, 39 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */
- 98900, 14, 14, 4, 22, 39, 22, 39 },
+ 98900, 14, 14, 4, 22, 40, 22, 40 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */
- 108300, 15, 15, 4, 23, 40, 23, 41 },
+ 108300, 15, 15, 4, 23, 41, 24, 42 },
+ { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS_HGI, 144400, /* 144.4 Mb */
+ 12000, 15, 15, 4, 23, 41, 24, 42 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */
- 13200, 0, 0, 0, 8, 24, 24, 24 },
+ 13200, 0, 0, 0, 8, 25, 25, 25 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */
- 25900, 1, 1, 2, 9, 25, 25, 25 },
+ 25900, 1, 1, 2, 9, 26, 26, 26 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */
- 38600, 2, 2, 2, 10, 26, 26, 26 },
+ 38600, 2, 2, 2, 10, 27, 27, 27 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */
- 49800, 3, 3, 4, 11, 27, 27, 27 },
+ 49800, 3, 3, 4, 11, 28, 28, 28 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */
- 72200, 4, 4, 4, 12, 28, 28, 28 },
+ 72200, 4, 4, 4, 12, 29, 29, 29 },
{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */
- 92900, 5, 5, 4, 13, 29, 29, 29 },
+ 92900, 5, 5, 4, 13, 30, 30, 30 },
{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */
- 102700, 6, 6, 4, 14, 30, 30, 30 },
+ 102700, 6, 6, 4, 14, 31, 31, 31 },
{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */
- 112000, 7, 7, 4, 15, 31, 32, 32 },
+ 112000, 7, 7, 4, 15, 32, 33, 33 },
{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
- 122000, 7, 7, 4, 15, 31, 32, 32 },
+ 122000, 7, 7, 4, 15, 32, 33, 33 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */
- 25800, 8, 8, 0, 16, 33, 33, 33 },
+ 25800, 8, 8, 0, 16, 34, 34, 34 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */
- 49800, 9, 9, 2, 17, 34, 34, 34 },
+ 49800, 9, 9, 2, 17, 35, 35, 35 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */
- 71900, 10, 10, 2, 18, 35, 35, 35 },
+ 71900, 10, 10, 2, 18, 36, 36, 36 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */
- 92500, 11, 11, 4, 19, 36, 36, 36 },
+ 92500, 11, 11, 4, 19, 37, 37, 37 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */
- 130300, 12, 12, 4, 20, 37, 37, 37 },
+ 130300, 12, 12, 4, 20, 38, 38, 38 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */
- 162800, 13, 13, 4, 21, 38, 38, 38 },
+ 162800, 13, 13, 4, 21, 39, 39, 39 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */
- 178200, 14, 14, 4, 22, 39, 39, 39 },
+ 178200, 14, 14, 4, 22, 40, 40, 40 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */
- 192100, 15, 15, 4, 23, 40, 41, 41 },
+ 192100, 15, 15, 4, 23, 41, 42, 42 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
- 207000, 15, 15, 4, 23, 40, 41, 41 },
+ 207000, 15, 15, 4, 23, 41, 42, 42 },
},
50, /* probe interval */
WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -116,7 +118,7 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
* for HT are the 64K max aggregate limit */
static const struct ath_rate_table ar5416_11ng_ratetable = {
- 46,
+ 47,
12, /* MCS start */
{
{ VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
@@ -144,73 +146,75 @@ static const struct ath_rate_table ar5416_11ng_ratetable = {
{ VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
30900, 11, 108, 8, 11, 11, 11, 11 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */
- 6400, 0, 0, 4, 12, 28, 12, 28 },
+ 6400, 0, 0, 4, 12, 29, 12, 29 },
{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */
- 12700, 1, 1, 6, 13, 29, 13, 29 },
+ 12700, 1, 1, 6, 13, 30, 13, 30 },
{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */
- 18800, 2, 2, 6, 14, 30, 14, 30 },
+ 18800, 2, 2, 6, 14, 31, 14, 31 },
{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */
- 25000, 3, 3, 8, 15, 31, 15, 31 },
+ 25000, 3, 3, 8, 15, 32, 15, 32 },
{ VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */
- 36700, 4, 4, 8, 16, 32, 16, 32 },
+ 36700, 4, 4, 8, 16, 33, 16, 33 },
{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */
- 48100, 5, 5, 8, 17, 33, 17, 33 },
+ 48100, 5, 5, 8, 17, 34, 17, 34 },
{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */
- 53500, 6, 6, 8, 18, 34, 18, 34 },
+ 53500, 6, 6, 8, 18, 35, 18, 35 },
{ INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */
- 59000, 7, 7, 8, 19, 35, 19, 36 },
+ 59000, 7, 7, 8, 19, 36, 19, 37 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */
- 12700, 8, 8, 4, 20, 37, 20, 37 },
+ 12700, 8, 8, 4, 20, 38, 20, 38 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */
- 24800, 9, 9, 6, 21, 38, 21, 38 },
+ 24800, 9, 9, 6, 21, 39, 21, 39 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */
- 36600, 10, 10, 6, 22, 39, 22, 39 },
+ 36600, 10, 10, 6, 22, 40, 22, 40 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */
- 48100, 11, 11, 8, 23, 40, 23, 40 },
+ 48100, 11, 11, 8, 23, 41, 23, 41 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */
- 69500, 12, 12, 8, 24, 41, 24, 41 },
+ 69500, 12, 12, 8, 24, 42, 24, 42 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */
- 89500, 13, 13, 8, 25, 42, 25, 42 },
+ 89500, 13, 13, 8, 25, 43, 25, 43 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */
- 98900, 14, 14, 8, 26, 43, 26, 44 },
+ 98900, 14, 14, 8, 26, 44, 26, 44 },
{ VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */
- 108300, 15, 15, 8, 27, 44, 27, 45 },
+ 108300, 15, 15, 8, 27, 45, 28, 46 },
+ { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS_HGI, 144400, /* 130 Mb */
+ 120000, 15, 15, 8, 27, 45, 28, 46 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */
- 13200, 0, 0, 8, 12, 28, 28, 28 },
+ 13200, 0, 0, 8, 12, 29, 29, 29 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */
- 25900, 1, 1, 8, 13, 29, 29, 29 },
+ 25900, 1, 1, 8, 13, 30, 30, 30 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */
- 38600, 2, 2, 8, 14, 30, 30, 30 },
+ 38600, 2, 2, 8, 14, 31, 31, 31 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */
- 49800, 3, 3, 8, 15, 31, 31, 31 },
+ 49800, 3, 3, 8, 15, 32, 32, 32 },
{ VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */
- 72200, 4, 4, 8, 16, 32, 32, 32 },
+ 72200, 4, 4, 8, 16, 33, 33, 33 },
{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */
- 92900, 5, 5, 8, 17, 33, 33, 33 },
+ 92900, 5, 5, 8, 17, 34, 34, 34 },
{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */
- 102700, 6, 6, 8, 18, 34, 34, 34 },
+ 102700, 6, 6, 8, 18, 35, 35, 35 },
{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */
- 112000, 7, 7, 8, 19, 35, 36, 36 },
+ 112000, 7, 7, 8, 19, 36, 37, 37 },
{ INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
- 122000, 7, 7, 8, 19, 35, 36, 36 },
+ 122000, 7, 7, 8, 19, 36, 37, 37 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */
- 25800, 8, 8, 8, 20, 37, 37, 37 },
+ 25800, 8, 8, 8, 20, 38, 38, 38 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */
- 49800, 9, 9, 8, 21, 38, 38, 38 },
+ 49800, 9, 9, 8, 21, 39, 39, 39 },
{ INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */
- 71900, 10, 10, 8, 22, 39, 39, 39 },
+ 71900, 10, 10, 8, 22, 40, 40, 40 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */
- 92500, 11, 11, 8, 23, 40, 40, 40 },
+ 92500, 11, 11, 8, 23, 41, 41, 41 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */
- 130300, 12, 12, 8, 24, 41, 41, 41 },
+ 130300, 12, 12, 8, 24, 42, 42, 42 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */
- 162800, 13, 13, 8, 25, 42, 42, 42 },
+ 162800, 13, 13, 8, 25, 43, 43, 43 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */
- 178200, 14, 14, 8, 26, 43, 43, 43 },
+ 178200, 14, 14, 8, 26, 44, 44, 44 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */
- 192100, 15, 15, 8, 27, 44, 45, 45 },
+ 192100, 15, 15, 8, 27, 45, 46, 46 },
{ VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
- 207000, 15, 15, 8, 27, 44, 45, 45 },
+ 207000, 15, 15, 8, 27, 45, 46, 46 },
},
50, /* probe interval */
WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -1193,21 +1197,19 @@ static void ath_rc_init(struct ath_softc *sc,
}
static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
- bool is_cw40, bool is_sgi40)
+ bool is_cw40, bool is_sgi)
{
u8 caps = 0;
if (sta->ht_cap.ht_supported) {
caps = WLAN_RC_HT_FLAG;
- if (sc->sc_ah->caps.tx_chainmask != 1 &&
- ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_DS, 0, NULL)) {
- if (sta->ht_cap.mcs.rx_mask[1])
- caps |= WLAN_RC_DS_FLAG;
- }
+ if (sta->ht_cap.mcs.rx_mask[1])
+ caps |= WLAN_RC_DS_FLAG;
if (is_cw40)
caps |= WLAN_RC_40_FLAG;
- if (is_sgi40)
+ if (is_sgi)
caps |= WLAN_RC_SGI_FLAG;
+
}
return caps;
@@ -1300,7 +1302,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct ath_softc *sc = priv;
struct ath_rate_priv *ath_rc_priv = priv_sta;
const struct ath_rate_table *rate_table;
- bool is_cw40, is_sgi40;
+ bool is_cw40, is_sgi = false;
int i, j = 0;
for (i = 0; i < sband->n_bitrates; i++) {
@@ -1323,7 +1325,11 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
}
is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- is_sgi40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+
+ if (is_cw40)
+ is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+ is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
/* Choose rate table first */
@@ -1336,7 +1342,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
rate_table = hw_rate_table[sc->cur_rate_mode];
}
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi40);
+ ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi);
ath_rc_init(sc, priv_sta, sband, sta, rate_table);
}
@@ -1347,10 +1353,10 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
struct ath_softc *sc = priv;
struct ath_rate_priv *ath_rc_priv = priv_sta;
const struct ath_rate_table *rate_table = NULL;
- bool oper_cw40 = false, oper_sgi40;
+ bool oper_cw40 = false, oper_sgi;
bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ?
true : false;
- bool local_sgi40 = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ?
+ bool local_sgi = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ?
true : false;
/* FIXME: Handle AP mode later when we support CWM */
@@ -1363,15 +1369,21 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
oper_chan_type == NL80211_CHAN_HT40PLUS)
oper_cw40 = true;
- oper_sgi40 = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
- true : false;
+ if (oper_cw40)
+ oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ true : false;
+ else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+ oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ true : false;
+ else
+ oper_sgi = false;
- if ((local_cw40 != oper_cw40) || (local_sgi40 != oper_sgi40)) {
+ if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
rate_table = ath_choose_rate_table(sc, sband->band,
sta->ht_cap.ht_supported,
oper_cw40);
ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
- oper_cw40, oper_sgi40);
+ oper_cw40, oper_sgi);
ath_rc_init(sc, priv_sta, sband, sta, rate_table);
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ca6065b71b46..da0cfe90c38a 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -116,9 +116,6 @@ static void ath_opmode_init(struct ath_softc *sc)
/* configure operational mode */
ath9k_hw_setopmode(ah);
- /* Handle any link-level address change. */
- ath9k_hw_setmac(ah, common->macaddr);
-
/* calculate and install multicast filter */
mfilt[0] = mfilt[1] = ~0;
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -295,7 +292,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(sc->sc_ah);
+ ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING));
}
static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -501,7 +498,7 @@ int ath_startrecv(struct ath_softc *sc)
start_recv:
spin_unlock_bh(&sc->rx.rxbuflock);
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(ah);
+ ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING));
return 0;
}
@@ -700,12 +697,16 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
bf = SKB_CB_ATHBUF(skb);
BUG_ON(!bf);
- dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
common->rx_bufsize, DMA_FROM_DEVICE);
ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
- if (ret == -EINPROGRESS)
+ if (ret == -EINPROGRESS) {
+ /*let device gain the buffer again*/
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize, DMA_FROM_DEVICE);
return false;
+ }
__skb_unlink(skb, &rx_edma->rx_fifo);
if (ret == -EINVAL) {
@@ -814,13 +815,263 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
* 1. accessing the frame
* 2. requeueing the same buffer to h/w
*/
- dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
common->rx_bufsize,
DMA_FROM_DEVICE);
return bf;
}
+/* Assumes you've already done the endian to CPU conversion */
+static bool ath9k_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error)
+{
+ struct ath_hw *ah = common->ah;
+ __le16 fc;
+ u8 rx_status_len = ah->caps.rx_status_len;
+
+ fc = hdr->frame_control;
+
+ if (!rx_stats->rs_datalen)
+ return false;
+ /*
+ * rs_status follows rs_datalen so if rs_datalen is too large
+ * we can take a hint that hardware corrupted it, so ignore
+ * those frames.
+ */
+ if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
+ return false;
+
+ /*
+ * rs_more indicates chained descriptors which can be used
+ * to link buffers together for a sort of scatter-gather
+ * operation.
+ * reject the frame, we don't support scatter-gather yet and
+ * the frame is probably corrupt anyway
+ */
+ if (rx_stats->rs_more)
+ return false;
+
+ /*
+ * The rx_stats->rs_status will not be set until the end of the
+ * chained descriptors so it can be ignored if rs_more is set. The
+ * rs_more will be false at the last element of the chained
+ * descriptors.
+ */
+ if (rx_stats->rs_status != 0) {
+ if (rx_stats->rs_status & ATH9K_RXERR_CRC)
+ rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_stats->rs_status & ATH9K_RXERR_PHY)
+ return false;
+
+ if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
+ *decrypt_error = true;
+ } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
+ if (ieee80211_is_ctl(fc))
+ /*
+ * Sometimes, we get invalid
+ * MIC failures on valid control frames.
+ * Remove these mic errors.
+ */
+ rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
+ else
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+ }
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ if (ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (rx_stats->rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_CRC))
+ return false;
+ } else {
+ if (rx_stats->rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+static int ath9k_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+
+ band = hw->conf.channel->band;
+ sband = hw->wiphy->bands[band];
+
+ if (rx_stats->rs_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ if (rx_stats->rs_flags & ATH9K_RX_2040)
+ rxs->flag |= RX_FLAG_40MHZ;
+ if (rx_stats->rs_flags & ATH9K_RX_GI)
+ rxs->flag |= RX_FLAG_SHORT_GI;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
+ }
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ rxs->rate_idx = i;
+ return 0;
+ }
+ }
+
+ /*
+ * No valid hardware bitrate found -- we should not get here
+ * because hardware has already validated this frame as OK.
+ */
+ ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
+ "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
+
+ return -EINVAL;
+}
+
+static void ath9k_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rx_stats)
+{
+ struct ath_hw *ah = common->ah;
+ struct ieee80211_sta *sta;
+ struct ath_node *an;
+ int last_rssi = ATH_RSSI_DUMMY_MARKER;
+ __le16 fc;
+
+ fc = hdr->frame_control;
+
+ rcu_read_lock();
+ /*
+ * XXX: use ieee80211_find_sta! This requires quite a bit of work
+ * under the current ath9k virtual wiphy implementation as we have
+ * no way of tying a vif to wiphy. Typically vifs are attached to
+ * at least one sdata of a wiphy on mac80211 but with ath9k virtual
+ * wiphy you'd have to iterate over every wiphy and each sdata.
+ */
+ sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
+ if (sta) {
+ an = (struct ath_node *) sta->drv_priv;
+ if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
+ !rx_stats->rs_moreaggr)
+ ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
+ last_rssi = an->last_rssi;
+ }
+ rcu_read_unlock();
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
+ ATH_RSSI_EP_MULTIPLIER);
+ if (rx_stats->rs_rssi < 0)
+ rx_stats->rs_rssi = 0;
+
+ /* Update Beacon RSSI, this is used by ANI. */
+ if (ieee80211_is_beacon(fc))
+ ah->stats.avgbrssi = rx_stats->rs_rssi;
+}
+
+/*
+ * For Decrypt or Demic errors, we only mark packet status here and always push
+ * up the frame up to let mac80211 handle the actual error case, be it no
+ * decryption key or real decryption error. This let us keep statistics there.
+ */
+static int ath9k_rx_skb_preprocess(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rx_status,
+ bool *decrypt_error)
+{
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+ return -EINVAL;
+
+ ath9k_process_rssi(common, hw, hdr, rx_stats);
+
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+ return -EINVAL;
+
+ rx_status->band = hw->conf.channel->band;
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
+ rx_status->antenna = rx_stats->rs_antenna;
+ rx_status->flag |= RX_FLAG_TSFT;
+
+ return 0;
+}
+
+static void ath9k_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error)
+{
+ struct ath_hw *ah = common->ah;
+ struct ieee80211_hdr *hdr;
+ int hdrlen, padpos, padsize;
+ u8 keyix;
+ __le16 fc;
+
+ /* see if any padding is done by the hw and remove it */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - padpos % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ padsize = padpos & 3;
+ if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ keyix = rx_stats->rs_keyix;
+
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc)
+ && !decrypt_error && skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+
+ if (test_bit(keyix, common->keymap))
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ }
+ if (ah->sw_mgmt_crypto &&
+ (rxs->flag & RX_FLAG_DECRYPTED) &&
+ ieee80211_is_mgmt(fc))
+ /* Use software decrypt for management frames. */
+ rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
@@ -842,15 +1093,21 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
enum ath9k_rx_qtype qtype;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int dma_type;
+ u8 rx_status_len = ah->caps.rx_status_len;
+ u64 tsf = 0;
+ u32 tsf_lower = 0;
if (edma)
- dma_type = DMA_FROM_DEVICE;
- else
dma_type = DMA_BIDIRECTIONAL;
+ else
+ dma_type = DMA_FROM_DEVICE;
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
spin_lock_bh(&sc->rx.rxbuflock);
+ tsf = ath9k_hw_gettsf64(ah);
+ tsf_lower = tsf & 0xffffffff;
+
do {
/* If handling rx interrupt and flush is in progress => exit */
if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
@@ -869,7 +1126,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (!skb)
continue;
- hdr = (struct ieee80211_hdr *) skb->data;
+ hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(skb);
hw = ath_get_virt_hw(sc, hdr);
@@ -883,8 +1140,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (flush)
goto requeue;
- retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
- rxs, &decrypt_error);
+ rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
+ if (rs.rs_tstamp > tsf_lower &&
+ unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
+ rxs->mactime -= 0x100000000ULL;
+
+ if (rs.rs_tstamp < tsf_lower &&
+ unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
+ rxs->mactime += 0x100000000ULL;
+
+ retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+ rxs, &decrypt_error);
if (retval)
goto requeue;
@@ -908,8 +1174,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (ah->caps.rx_status_len)
skb_pull(skb, ah->caps.rx_status_len);
- ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
- rxs, decrypt_error);
+ ath9k_rx_skb_postprocess(common, skb, &rs,
+ rxs, decrypt_error);
/* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index d4371a43bdaa..3e3ccef438db 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -222,6 +222,7 @@
#define AR_ISR_S2 0x008c
#define AR_ISR_S2_QCU_TXURN 0x000003FF
+#define AR_ISR_S2_BB_WATCHDOG 0x00010000
#define AR_ISR_S2_CST 0x00400000
#define AR_ISR_S2_GTT 0x00800000
#define AR_ISR_S2_TIM 0x01000000
@@ -699,6 +700,9 @@
#define AR_RC_HOSTIF 0x00000100
#define AR_WA 0x4004
+#define AR_WA_BIT6 (1 << 6)
+#define AR_WA_BIT7 (1 << 7)
+#define AR_WA_BIT23 (1 << 23)
#define AR_WA_D3_L1_DISABLE (1 << 14)
#define AR9285_WA_DEFAULT 0x004a050b
#define AR9280_WA_DEFAULT 0x0040073b
@@ -756,32 +760,33 @@
#define AR_SREV_REVISION2 0x00000F00
#define AR_SREV_REVISION2_S 8
-#define AR_SREV_VERSION_5416_PCI 0xD
-#define AR_SREV_VERSION_5416_PCIE 0xC
-#define AR_SREV_REVISION_5416_10 0
-#define AR_SREV_REVISION_5416_20 1
-#define AR_SREV_REVISION_5416_22 2
-#define AR_SREV_VERSION_9100 0x14
-#define AR_SREV_VERSION_9160 0x40
-#define AR_SREV_REVISION_9160_10 0
-#define AR_SREV_REVISION_9160_11 1
-#define AR_SREV_VERSION_9280 0x80
-#define AR_SREV_REVISION_9280_10 0
-#define AR_SREV_REVISION_9280_20 1
-#define AR_SREV_REVISION_9280_21 2
-#define AR_SREV_VERSION_9285 0xC0
-#define AR_SREV_REVISION_9285_10 0
-#define AR_SREV_REVISION_9285_11 1
-#define AR_SREV_REVISION_9285_12 2
-#define AR_SREV_VERSION_9287 0x180
-#define AR_SREV_REVISION_9287_10 0
-#define AR_SREV_REVISION_9287_11 1
-#define AR_SREV_REVISION_9287_12 2
-#define AR_SREV_VERSION_9271 0x140
-#define AR_SREV_REVISION_9271_10 0
-#define AR_SREV_REVISION_9271_11 1
-#define AR_SREV_VERSION_9300 0x1c0
-#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
+#define AR_SREV_VERSION_5416_PCI 0xD
+#define AR_SREV_VERSION_5416_PCIE 0xC
+#define AR_SREV_REVISION_5416_10 0
+#define AR_SREV_REVISION_5416_20 1
+#define AR_SREV_REVISION_5416_22 2
+#define AR_SREV_VERSION_9100 0x14
+#define AR_SREV_VERSION_9160 0x40
+#define AR_SREV_REVISION_9160_10 0
+#define AR_SREV_REVISION_9160_11 1
+#define AR_SREV_VERSION_9280 0x80
+#define AR_SREV_REVISION_9280_10 0
+#define AR_SREV_REVISION_9280_20 1
+#define AR_SREV_REVISION_9280_21 2
+#define AR_SREV_VERSION_9285 0xC0
+#define AR_SREV_REVISION_9285_10 0
+#define AR_SREV_REVISION_9285_11 1
+#define AR_SREV_REVISION_9285_12 2
+#define AR_SREV_VERSION_9287 0x180
+#define AR_SREV_REVISION_9287_10 0
+#define AR_SREV_REVISION_9287_11 1
+#define AR_SREV_REVISION_9287_12 2
+#define AR_SREV_REVISION_9287_13 3
+#define AR_SREV_VERSION_9271 0x140
+#define AR_SREV_REVISION_9271_10 0
+#define AR_SREV_REVISION_9271_11 1
+#define AR_SREV_VERSION_9300 0x1c0
+#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -859,6 +864,11 @@
(((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_12)))
+#define AR_SREV_9287_13_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_13)))
+
#define AR_SREV_9271(_ah) \
(((_ah))->hw_version.macVersion == AR_SREV_VERSION_9271)
#define AR_SREV_9271_10(_ah) \
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 105ad40968f6..89423ca23d2c 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -219,7 +219,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
info->control.rates[1].idx = -1;
memset(&txctl, 0, sizeof(struct ath_tx_control));
- txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
+ txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]];
txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index e23172c9caaf..6260faa658a2 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -279,9 +279,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
if (wmi->drv_priv->op_flags & OP_UNPLUGGED)
return 0;
- if (!wmi)
- return -EINVAL;
-
skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 859aa4ab0769..20221b8c04fd 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -941,6 +941,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
if (!ATH_TXQ_SETUP(sc, qnum)) {
struct ath_txq *txq = &sc->tx.txq[qnum];
+ txq->axq_class = subtype;
txq->axq_qnum = qnum;
txq->axq_link = NULL;
INIT_LIST_HEAD(&txq->axq_q);
@@ -958,58 +959,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
return &sc->tx.txq[qnum];
}
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
-{
- int qnum;
-
- switch (qtype) {
- case ATH9K_TX_QUEUE_DATA:
- if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "HAL AC %u out of range, max %zu!\n",
- haltype, ARRAY_SIZE(sc->tx.hwq_map));
- return -1;
- }
- qnum = sc->tx.hwq_map[haltype];
- break;
- case ATH9K_TX_QUEUE_BEACON:
- qnum = sc->beacon.beaconq;
- break;
- case ATH9K_TX_QUEUE_CAB:
- qnum = sc->beacon.cabq->axq_qnum;
- break;
- default:
- qnum = -1;
- }
- return qnum;
-}
-
-struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
-{
- struct ath_txq *txq = NULL;
- u16 skb_queue = skb_get_queue_mapping(skb);
- int qnum;
-
- qnum = ath_get_hal_qnum(skb_queue, sc);
- txq = &sc->tx.txq[qnum];
-
- spin_lock_bh(&txq->axq_lock);
-
- if (txq->axq_depth >= (ATH_TXBUF - 20)) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
- "TX queue: %d is full, depth: %d\n",
- qnum, txq->axq_depth);
- ath_mac80211_stop_queue(sc, skb_queue);
- txq->stopped = 1;
- spin_unlock_bh(&txq->axq_lock);
- return NULL;
- }
-
- spin_unlock_bh(&txq->axq_lock);
-
- return txq;
-}
-
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *qinfo)
{
@@ -1688,12 +1637,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf)) {
+ if (!txctl->paprd && conf_is_ht(&hw->conf)) {
bf->bf_state.bf_type |= BUF_HT;
if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
use_ldpc = true;
}
+ bf->bf_state.bfs_paprd = txctl->paprd;
bf->bf_flags = setup_tx_flags(skb, use_ldpc);
bf->bf_keytype = get_hw_crypto_keytype(skb);
@@ -1768,6 +1718,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
bf->bf_buf_addr,
txctl->txq->axq_qnum);
+ if (bf->bf_state.bfs_paprd)
+ ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
+
spin_lock_bh(&txctl->txq->axq_lock);
if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
@@ -1809,8 +1762,9 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_txq *txq = txctl->txq;
struct ath_buf *bf;
- int r;
+ int q, r;
bf = ath_tx_get_buffer(sc);
if (!bf) {
@@ -1820,8 +1774,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
r = ath_tx_setup_buffer(hw, bf, skb, txctl);
if (unlikely(r)) {
- struct ath_txq *txq = txctl->txq;
-
ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
/* upon ath_tx_processq() this TX queue will be resumed, we
@@ -1829,7 +1781,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
* we will at least have to run TX completionon one buffer
* on the queue */
spin_lock_bh(&txq->axq_lock);
- if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
+ if (!txq->stopped && txq->axq_depth > 1) {
ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
txq->stopped = 1;
}
@@ -1840,6 +1792,17 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
return r;
}
+ q = skb_get_queue_mapping(skb);
+ if (q >= 4)
+ q = 0;
+
+ spin_lock_bh(&txq->axq_lock);
+ if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
+ ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
+ txq->stopped = 1;
+ }
+ spin_unlock_bh(&txq->axq_lock);
+
ath_tx_start_dma(sc, bf, txctl);
return 0;
@@ -1909,7 +1872,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
- int padpos, padsize;
+ int q, padpos, padsize;
ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
@@ -1948,8 +1911,16 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
ath9k_tx_status(hw, skb);
- else
+ else {
+ q = skb_get_queue_mapping(skb);
+ if (q >= 4)
+ q = 0;
+
+ if (--sc->tx.pending_frames[q] < 0)
+ sc->tx.pending_frames[q] = 0;
+
ieee80211_tx_status(hw, skb);
+ }
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1971,8 +1942,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
}
dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
- ath_tx_complete(sc, skb, bf->aphy, tx_flags);
- ath_debug_stat_tx(sc, txq, bf, ts);
+
+ if (bf->bf_state.bfs_paprd) {
+ sc->paprd_txok = txok;
+ complete(&sc->paprd_complete);
+ } else {
+ ath_tx_complete(sc, skb, bf->aphy, tx_flags);
+ ath_debug_stat_tx(sc, txq, bf, ts);
+ }
/*
* Return the list of ath_buf of this mpdu to free queue
@@ -2057,14 +2034,14 @@ static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
{
int qnum;
+ qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
+ if (qnum == -1)
+ return;
+
spin_lock_bh(&txq->axq_lock);
- if (txq->stopped &&
- sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
- qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
- if (qnum != -1) {
- ath_mac80211_start_queue(sc, qnum);
- txq->stopped = 0;
- }
+ if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
+ ath_mac80211_start_queue(sc, qnum);
+ txq->stopped = 0;
}
spin_unlock_bh(&txq->axq_lock);
}
@@ -2279,6 +2256,17 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
txok = !(txs.ts_status & ATH9K_TXERR_MASK);
+ /*
+ * Make sure null func frame is acked before configuring
+ * hw into ps mode.
+ */
+ if (bf->bf_isnullfunc && txok) {
+ if ((sc->ps_flags & PS_ENABLED))
+ ath9k_enable_ps(sc);
+ else
+ sc->ps_flags |= PS_NULLFUNC_COMPLETED;
+ }
+
if (!bf_isampdu(bf)) {
bf->bf_retries = txs.ts_longretry;
if (txs.ts_status & ATH9K_TXERR_XRETRY)
@@ -2424,26 +2412,8 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
for (acno = 0, ac = &an->ac[acno];
acno < WME_NUM_AC; acno++, ac++) {
ac->sched = false;
+ ac->qnum = sc->tx.hwq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
-
- switch (acno) {
- case WME_AC_BE:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
- break;
- case WME_AC_BK:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
- break;
- case WME_AC_VI:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
- break;
- case WME_AC_VO:
- ac->qnum = ath_tx_get_qnum(sc,
- ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
- break;
- }
}
}
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 3a003e6803a5..8674a99356af 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -530,7 +530,7 @@ struct b43_fw_header {
/* Size of the data. For ucode and PCM this is in bytes.
* For IV this is number-of-ivs. */
__be32 size;
-} __attribute__((__packed__));
+} __packed;
/* Initial Value file format */
#define B43_IV_OFFSET_MASK 0x7FFF
@@ -540,8 +540,8 @@ struct b43_iv {
union {
__be16 d16;
__be32 d32;
- } data __attribute__((__packed__));
-} __attribute__((__packed__));
+ } data __packed;
+} __packed;
/* Data structures for DMA transmission, per 80211 core. */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index fa40fdfea719..10d0aaf754c5 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -333,11 +333,11 @@ static inline
dma_addr_t dmaaddr;
if (tx) {
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
- buf, len, DMA_TO_DEVICE);
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
+ buf, len, DMA_TO_DEVICE);
} else {
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
- buf, len, DMA_FROM_DEVICE);
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
+ buf, len, DMA_FROM_DEVICE);
}
return dmaaddr;
@@ -348,11 +348,11 @@ static inline
dma_addr_t addr, size_t len, int tx)
{
if (tx) {
- ssb_dma_unmap_single(ring->dev->dev,
- addr, len, DMA_TO_DEVICE);
+ dma_unmap_single(ring->dev->dev->dma_dev,
+ addr, len, DMA_TO_DEVICE);
} else {
- ssb_dma_unmap_single(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_unmap_single(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
}
@@ -361,7 +361,7 @@ static inline
dma_addr_t addr, size_t len)
{
B43_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_cpu(ring->dev->dev,
+ dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE);
}
@@ -370,8 +370,8 @@ static inline
dma_addr_t addr, size_t len)
{
B43_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_device(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
static inline
@@ -401,9 +401,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
*/
if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
- ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
- B43_DMA_RINGMEMSIZE,
- &(ring->dmabase), flags);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ B43_DMA_RINGMEMSIZE,
+ &(ring->dmabase), flags);
if (!ring->descbase) {
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM;
@@ -420,8 +420,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
- ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
- ring->descbase, ring->dmabase, flags);
+ dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
+ ring->descbase, ring->dmabase);
}
/* Reset the RX DMA channel */
@@ -528,7 +528,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
dma_addr_t addr,
size_t buffersize, bool dma_to_device)
{
- if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
+ if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
return 1;
switch (ring->type) {
@@ -874,10 +874,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
- dma_test = ssb_dma_map_single(dev->dev,
- ring->txhdr_cache,
- b43_txhdr_size(dev),
- DMA_TO_DEVICE);
+ dma_test = dma_map_single(dev->dev->dma_dev,
+ ring->txhdr_cache,
+ b43_txhdr_size(dev),
+ DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
@@ -889,10 +889,10 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
if (!ring->txhdr_cache)
goto err_kfree_meta;
- dma_test = ssb_dma_map_single(dev->dev,
- ring->txhdr_cache,
- b43_txhdr_size(dev),
- DMA_TO_DEVICE);
+ dma_test = dma_map_single(dev->dev->dma_dev,
+ ring->txhdr_cache,
+ b43_txhdr_size(dev),
+ DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
@@ -903,9 +903,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
}
}
- ssb_dma_unmap_single(dev->dev,
- dma_test, b43_txhdr_size(dev),
- DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev->dma_dev,
+ dma_test, b43_txhdr_size(dev),
+ DMA_TO_DEVICE);
}
err = alloc_ringmemory(ring);
@@ -1018,9 +1018,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = ssb_dma_set_mask(dev->dev, mask);
- if (!err)
- break;
+ err = dma_set_mask(dev->dev->dma_dev, mask);
+ if (!err) {
+ err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
+ }
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = 1;
@@ -1221,14 +1224,14 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA);
+ priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
+ GFP_ATOMIC | GFP_DMA);
if (!priv_info->bouncebuffer) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -ENOMEM;
goto out_unmap_hdr;
}
- memcpy(priv_info->bouncebuffer, skb->data, skb->len);
meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index dc91944d6022..a01c2100f166 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -67,7 +67,7 @@
struct b43_dmadesc32 {
__le32 control;
__le32 address;
-} __attribute__ ((__packed__));
+} __packed;
#define B43_DMA32_DCTL_BYTECNT 0x00001FFF
#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000
#define B43_DMA32_DCTL_ADDREXT_SHIFT 16
@@ -140,7 +140,7 @@ struct b43_dmadesc64 {
__le32 control1;
__le32 address_low;
__le32 address_high;
-} __attribute__ ((__packed__));
+} __packed;
#define B43_DMA64_DCTL0_DTABLEEND 0x10000000
#define B43_DMA64_DCTL0_IRQ 0x20000000
#define B43_DMA64_DCTL0_FRAMEEND 0x40000000
@@ -153,8 +153,8 @@ struct b43_dmadesc_generic {
union {
struct b43_dmadesc32 dma32;
struct b43_dmadesc64 dma64;
- } __attribute__ ((__packed__));
-} __attribute__ ((__packed__));
+ } __packed;
+} __packed;
/* Misc DMA constants */
#define B43_DMA_RINGMEMSIZE PAGE_SIZE
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index d23ff9fe0c9e..d4cf9b390af3 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -10,8 +10,8 @@
union { \
__le32 data; \
__u8 raw[size]; \
- } __attribute__((__packed__)); \
- } __attribute__((__packed__))
+ } __packed; \
+ } __packed
/* struct b43_plcp_hdr4 */
_b43_declare_plcp_hdr(4);
@@ -57,7 +57,7 @@ struct b43_txhdr {
__u8 rts_frame[16]; /* The RTS frame (if used) */
PAD_BYTES(2);
struct b43_plcp_hdr6 plcp; /* Main PLCP header */
- } new_format __attribute__ ((__packed__));
+ } new_format __packed;
/* The old r351 format. */
struct {
@@ -68,10 +68,10 @@ struct b43_txhdr {
__u8 rts_frame[16]; /* The RTS frame (if used) */
PAD_BYTES(2);
struct b43_plcp_hdr6 plcp; /* Main PLCP header */
- } old_format __attribute__ ((__packed__));
+ } old_format __packed;
- } __attribute__ ((__packed__));
-} __attribute__ ((__packed__));
+ } __packed;
+} __packed;
/* MAC TX control */
#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */
@@ -218,20 +218,20 @@ struct b43_rxhdr_fw4 {
struct {
__u8 jssi; /* PHY RX Status 1: JSSI */
__u8 sig_qual; /* PHY RX Status 1: Signal Quality */
- } __attribute__ ((__packed__));
+ } __packed;
/* RSSI for N-PHYs */
struct {
__s8 power0; /* PHY RX Status 1: Power 0 */
__s8 power1; /* PHY RX Status 1: Power 1 */
- } __attribute__ ((__packed__));
- } __attribute__ ((__packed__));
+ } __packed;
+ } __packed;
__le16 phy_status2; /* PHY RX Status 2 */
__le16 phy_status3; /* PHY RX Status 3 */
__le32 mac_status; /* MAC RX status */
__le16 mac_time;
__le16 channel;
-} __attribute__ ((__packed__));
+} __packed;
/* PHY RX Status 0 */
#define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 89fe2f972c72..c81b2f53b0c5 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -372,7 +372,7 @@ struct b43legacy_fw_header {
/* Size of the data. For ucode and PCM this is in bytes.
* For IV this is number-of-ivs. */
__be32 size;
-} __attribute__((__packed__));
+} __packed;
/* Initial Value file format */
#define B43legacy_IV_OFFSET_MASK 0x7FFF
@@ -382,8 +382,8 @@ struct b43legacy_iv {
union {
__be16 d16;
__be32 d32;
- } data __attribute__((__packed__));
-} __attribute__((__packed__));
+ } data __packed;
+} __packed;
#define B43legacy_PHYMODE(phytype) (1 << (phytype))
#define B43legacy_PHYMODE_B B43legacy_PHYMODE \
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index e91520d0312e..e03e01d0bc35 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -394,11 +394,11 @@ dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
dma_addr_t dmaaddr;
if (tx)
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len,
DMA_TO_DEVICE);
else
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len,
DMA_FROM_DEVICE);
@@ -412,11 +412,11 @@ void unmap_descbuffer(struct b43legacy_dmaring *ring,
int tx)
{
if (tx)
- ssb_dma_unmap_single(ring->dev->dev,
+ dma_unmap_single(ring->dev->dev->dma_dev,
addr, len,
DMA_TO_DEVICE);
else
- ssb_dma_unmap_single(ring->dev->dev,
+ dma_unmap_single(ring->dev->dev->dma_dev,
addr, len,
DMA_FROM_DEVICE);
}
@@ -428,8 +428,8 @@ void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
{
B43legacy_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_cpu(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
static inline
@@ -439,8 +439,8 @@ void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
{
B43legacy_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_device(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
static inline
@@ -460,10 +460,10 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
static int alloc_ringmemory(struct b43legacy_dmaring *ring)
{
/* GFP flags must match the flags in free_ringmemory()! */
- ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
- B43legacy_DMA_RINGMEMSIZE,
- &(ring->dmabase),
- GFP_KERNEL);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ B43legacy_DMA_RINGMEMSIZE,
+ &(ring->dmabase),
+ GFP_KERNEL);
if (!ring->descbase) {
b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
" failed\n");
@@ -476,8 +476,8 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)
static void free_ringmemory(struct b43legacy_dmaring *ring)
{
- ssb_dma_free_consistent(ring->dev->dev, B43legacy_DMA_RINGMEMSIZE,
- ring->descbase, ring->dmabase, GFP_KERNEL);
+ dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
+ ring->descbase, ring->dmabase);
}
/* Reset the RX DMA channel */
@@ -589,7 +589,7 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
size_t buffersize,
bool dma_to_device)
{
- if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
+ if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
return 1;
switch (ring->type) {
@@ -906,7 +906,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
- dma_test = ssb_dma_map_single(dev->dev, ring->txhdr_cache,
+ dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
@@ -920,7 +920,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
if (!ring->txhdr_cache)
goto err_kfree_meta;
- dma_test = ssb_dma_map_single(dev->dev,
+ dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache,
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
@@ -930,9 +930,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
goto err_kfree_txhdr_cache;
}
- ssb_dma_unmap_single(dev->dev, dma_test,
- sizeof(struct b43legacy_txhdr_fw3),
- DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev->dma_dev, dma_test,
+ sizeof(struct b43legacy_txhdr_fw3),
+ DMA_TO_DEVICE);
}
ring->nr_slots = nr_slots;
@@ -1040,9 +1040,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = ssb_dma_set_mask(dev->dev, mask);
- if (!err)
- break;
+ err = dma_set_mask(dev->dev->dma_dev, mask);
+ if (!err) {
+ err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
+ }
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = 1;
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index f9681041c2d8..f89c34226288 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -72,7 +72,7 @@
struct b43legacy_dmadesc32 {
__le32 control;
__le32 address;
-} __attribute__((__packed__));
+} __packed;
#define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF
#define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000
#define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16
@@ -147,7 +147,7 @@ struct b43legacy_dmadesc64 {
__le32 control1;
__le32 address_low;
__le32 address_high;
-} __attribute__((__packed__));
+} __packed;
#define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000
#define B43legacy_DMA64_DCTL0_IRQ 0x20000000
#define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000
@@ -162,8 +162,8 @@ struct b43legacy_dmadesc_generic {
union {
struct b43legacy_dmadesc32 dma32;
struct b43legacy_dmadesc64 dma64;
- } __attribute__((__packed__));
-} __attribute__((__packed__));
+ } __packed;
+} __packed;
/* Misc DMA constants */
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index 91633087a20b..289db00a4a7b 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -9,8 +9,8 @@
union { \
__le32 data; \
__u8 raw[size]; \
- } __attribute__((__packed__)); \
- } __attribute__((__packed__))
+ } __packed; \
+ } __packed
/* struct b43legacy_plcp_hdr4 */
_b43legacy_declare_plcp_hdr(4);
@@ -39,7 +39,7 @@ struct b43legacy_txhdr_fw3 {
struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */
__u8 rts_frame[18]; /* The RTS frame (if used) */
struct b43legacy_plcp_hdr6 plcp;
-} __attribute__((__packed__));
+} __packed;
/* MAC TX control */
#define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */
@@ -123,7 +123,7 @@ struct b43legacy_hwtxstatus {
__le16 seq;
u8 phy_stat;
PAD_BYTES(1);
-} __attribute__((__packed__));
+} __packed;
/* Receive header for v3 firmware. */
@@ -138,7 +138,7 @@ struct b43legacy_rxhdr_fw3 {
__le16 mac_status; /* MAC RX status */
__le16 mac_time;
__le16 channel;
-} __attribute__((__packed__));
+} __packed;
/* PHY RX Status 0 */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
index 7f9d8d976aa8..ed98ce7c8f65 100644
--- a/drivers/net/wireless/hostap/hostap_80211.h
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -19,35 +19,35 @@ struct hostap_ieee80211_mgmt {
__le16 status_code;
/* possibly followed by Challenge text */
u8 variable[0];
- } __attribute__ ((packed)) auth;
+ } __packed auth;
struct {
__le16 reason_code;
- } __attribute__ ((packed)) deauth;
+ } __packed deauth;
struct {
__le16 capab_info;
__le16 listen_interval;
/* followed by SSID and Supported rates */
u8 variable[0];
- } __attribute__ ((packed)) assoc_req;
+ } __packed assoc_req;
struct {
__le16 capab_info;
__le16 status_code;
__le16 aid;
/* followed by Supported rates */
u8 variable[0];
- } __attribute__ ((packed)) assoc_resp, reassoc_resp;
+ } __packed assoc_resp, reassoc_resp;
struct {
__le16 capab_info;
__le16 listen_interval;
u8 current_ap[6];
/* followed by SSID and Supported rates */
u8 variable[0];
- } __attribute__ ((packed)) reassoc_req;
+ } __packed reassoc_req;
struct {
__le16 reason_code;
- } __attribute__ ((packed)) disassoc;
+ } __packed disassoc;
struct {
- } __attribute__ ((packed)) probe_req;
+ } __packed probe_req;
struct {
u8 timestamp[8];
__le16 beacon_int;
@@ -55,9 +55,9 @@ struct hostap_ieee80211_mgmt {
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params, TIM */
u8 variable[0];
- } __attribute__ ((packed)) beacon, probe_resp;
+ } __packed beacon, probe_resp;
} u;
-} __attribute__ ((packed));
+} __packed;
#define IEEE80211_MGMT_HDR_LEN 24
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
index 90b64b092007..4230102ac9e4 100644
--- a/drivers/net/wireless/hostap/hostap_common.h
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -179,7 +179,7 @@ struct hfa384x_comp_ident
__le16 variant;
__le16 major;
__le16 minor;
-} __attribute__ ((packed));
+} __packed;
#define HFA384X_COMP_ID_PRI 0x15
#define HFA384X_COMP_ID_STA 0x1f
@@ -192,14 +192,14 @@ struct hfa384x_sup_range
__le16 variant;
__le16 bottom;
__le16 top;
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_build_id
{
__le16 pri_seq;
__le16 sec_seq;
-} __attribute__ ((packed));
+} __packed;
/* FD01 - Download Buffer */
struct hfa384x_rid_download_buffer
@@ -207,14 +207,14 @@ struct hfa384x_rid_download_buffer
__le16 page;
__le16 offset;
__le16 length;
-} __attribute__ ((packed));
+} __packed;
/* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */
struct hfa384x_comms_quality {
__le16 comm_qual; /* 0 .. 92 */
__le16 signal_level; /* 27 .. 154 */
__le16 noise_level; /* 27 .. 154 */
-} __attribute__ ((packed));
+} __packed;
/* netdevice private ioctls (used, e.g., with iwpriv from user space) */
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index 1ba33be98b25..1c66b3c1030d 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -31,14 +31,14 @@ struct linux_wlan_ng_val {
u32 did;
u16 status, len;
u32 data;
-} __attribute__ ((packed));
+} __packed;
struct linux_wlan_ng_prism_hdr {
u32 msgcode, msglen;
char devname[16];
struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal,
noise, rate, istx, frmlen;
-} __attribute__ ((packed));
+} __packed;
struct linux_wlan_ng_cap_hdr {
__be32 version;
@@ -55,7 +55,7 @@ struct linux_wlan_ng_cap_hdr {
__be32 ssi_noise;
__be32 preamble;
__be32 encoding;
-} __attribute__ ((packed));
+} __packed;
struct hostap_radiotap_rx {
struct ieee80211_radiotap_header hdr;
@@ -66,7 +66,7 @@ struct hostap_radiotap_rx {
__le16 chan_flags;
s8 dbm_antsignal;
s8 dbm_antnoise;
-} __attribute__ ((packed));
+} __packed;
#define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */
#define LWNG_CAPHDR_VERSION 0x80211001
@@ -97,7 +97,7 @@ struct hfa384x_rx_frame {
__be16 len;
/* followed by frame data; max 2304 bytes */
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_tx_frame {
@@ -126,14 +126,14 @@ struct hfa384x_tx_frame {
__be16 len;
/* followed by frame data; max 2304 bytes */
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_rid_hdr
{
__le16 len;
__le16 rid;
-} __attribute__ ((packed));
+} __packed;
/* Macro for converting signal levels (range 27 .. 154) to wireless ext
@@ -145,24 +145,24 @@ struct hfa384x_rid_hdr
struct hfa384x_scan_request {
__le16 channel_list;
__le16 txrate; /* HFA384X_RATES_* */
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_hostscan_request {
__le16 channel_list;
__le16 txrate;
__le16 target_ssid_len;
u8 target_ssid[32];
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_join_request {
u8 bssid[6];
__le16 channel;
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_info_frame {
__le16 len;
__le16 type;
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_comm_tallies {
__le16 tx_unicast_frames;
@@ -186,7 +186,7 @@ struct hfa384x_comm_tallies {
__le16 rx_discards_wep_undecryptable;
__le16 rx_message_in_msg_fragments;
__le16 rx_message_in_bad_msg_fragments;
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_comm_tallies32 {
__le32 tx_unicast_frames;
@@ -210,7 +210,7 @@ struct hfa384x_comm_tallies32 {
__le32 rx_discards_wep_undecryptable;
__le32 rx_message_in_msg_fragments;
__le32 rx_message_in_bad_msg_fragments;
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_scan_result_hdr {
__le16 reserved;
@@ -219,7 +219,7 @@ struct hfa384x_scan_result_hdr {
#define HFA384X_SCAN_HOST_INITIATED 1
#define HFA384X_SCAN_FIRMWARE_INITIATED 2
#define HFA384X_SCAN_INQUIRY_FROM_HOST 3
-} __attribute__ ((packed));
+} __packed;
#define HFA384X_SCAN_MAX_RESULTS 32
@@ -234,7 +234,7 @@ struct hfa384x_scan_result {
u8 ssid[32];
u8 sup_rates[10];
__le16 rate;
-} __attribute__ ((packed));
+} __packed;
struct hfa384x_hostscan_result {
__le16 chid;
@@ -248,7 +248,7 @@ struct hfa384x_hostscan_result {
u8 sup_rates[10];
__le16 rate;
__le16 atim;
-} __attribute__ ((packed));
+} __packed;
struct comm_tallies_sums {
unsigned int tx_unicast_frames;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 0bd4dfa59a8a..56350d571960 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -3467,10 +3467,8 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
dma_addr_t p;
priv->msg_buffers =
- (struct ipw2100_tx_packet *)kmalloc(IPW_COMMAND_POOL_SIZE *
- sizeof(struct
- ipw2100_tx_packet),
- GFP_KERNEL);
+ kmalloc(IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet),
+ GFP_KERNEL);
if (!priv->msg_buffers) {
printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for msg "
"buffers.\n", priv->net_dev->name);
@@ -4499,10 +4497,8 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
}
priv->tx_buffers =
- (struct ipw2100_tx_packet *)kmalloc(TX_PENDED_QUEUE_LENGTH *
- sizeof(struct
- ipw2100_tx_packet),
- GFP_ATOMIC);
+ kmalloc(TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet),
+ GFP_ATOMIC);
if (!priv->tx_buffers) {
printk(KERN_ERR DRV_NAME
": %s: alloc failed form tx buffers.\n",
@@ -4651,9 +4647,9 @@ static int ipw2100_rx_allocate(struct ipw2100_priv *priv)
/*
* allocate packets
*/
- priv->rx_buffers = (struct ipw2100_rx_packet *)
- kmalloc(RX_QUEUE_LENGTH * sizeof(struct ipw2100_rx_packet),
- GFP_KERNEL);
+ priv->rx_buffers = kmalloc(RX_QUEUE_LENGTH *
+ sizeof(struct ipw2100_rx_packet),
+ GFP_KERNEL);
if (!priv->rx_buffers) {
IPW_DEBUG_INFO("can't allocate rx packet buffer table\n");
@@ -5233,7 +5229,7 @@ struct security_info_params {
u8 auth_mode;
u8 replay_counters_number;
u8 unicast_using_group;
-} __attribute__ ((packed));
+} __packed;
static int ipw2100_set_security_information(struct ipw2100_priv *priv,
int auth_mode,
@@ -8475,7 +8471,7 @@ struct ipw2100_fw_header {
short mode;
unsigned int fw_size;
unsigned int uc_size;
-} __attribute__ ((packed));
+} __packed;
static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
{
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 1eab0d698f4d..838002b4881e 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -164,7 +164,7 @@ struct bd_status {
} fields;
u8 field;
} info;
-} __attribute__ ((packed));
+} __packed;
struct ipw2100_bd {
u32 host_addr;
@@ -174,7 +174,7 @@ struct ipw2100_bd {
* 1st TBD) */
u8 num_fragments;
u8 reserved[6];
-} __attribute__ ((packed));
+} __packed;
#define IPW_BD_QUEUE_LENGTH(n) (1<<n)
#define IPW_BD_ALIGNMENT(L) (L*sizeof(struct ipw2100_bd))
@@ -232,7 +232,7 @@ struct ipw2100_status {
#define IPW_STATUS_FLAG_WEP_ENCRYPTED (1<<1)
#define IPW_STATUS_FLAG_CRC_ERROR (1<<2)
u8 rssi;
-} __attribute__ ((packed));
+} __packed;
struct ipw2100_status_queue {
/* driver (virtual) pointer to queue */
@@ -293,7 +293,7 @@ struct ipw2100_cmd_header {
u32 reserved1[3];
u32 *ordinal1_ptr;
u32 *ordinal2_ptr;
-} __attribute__ ((packed));
+} __packed;
struct ipw2100_data_header {
u32 host_command_reg;
@@ -307,7 +307,7 @@ struct ipw2100_data_header {
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
u16 fragment_size;
-} __attribute__ ((packed));
+} __packed;
/* Host command data structure */
struct host_command {
@@ -316,7 +316,7 @@ struct host_command {
u32 host_command_sequence; // UNIQUE COMMAND NUMBER (ID)
u32 host_command_length; // LENGTH
u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN]; // COMMAND PARAMETERS
-} __attribute__ ((packed));
+} __packed;
typedef enum {
POWER_ON_RESET,
@@ -382,7 +382,7 @@ struct ipw2100_notification {
u32 hnhdr_size; /* size in bytes of data
or number of entries, if table.
Does NOT include header */
-} __attribute__ ((packed));
+} __packed;
#define MAX_KEY_SIZE 16
#define MAX_KEYS 8
@@ -814,7 +814,7 @@ struct ipw2100_rx {
struct ipw2100_notification notification;
struct ipw2100_cmd_header command;
} rx_data;
-} __attribute__ ((packed));
+} __packed;
/* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */
#define TX_RATE_1_MBIT 0x0001
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 3aa3bb18f615..cb2552a6777c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -96,7 +96,7 @@ static int network_mode = 0;
static u32 ipw_debug_level;
static int associate;
static int auto_create = 1;
-static int led_support = 0;
+static int led_support = 1;
static int disable = 0;
static int bt_coexist = 0;
static int hwcrypto = 0;
@@ -6624,13 +6624,12 @@ static int ipw_wx_set_genie(struct net_device *dev,
return -EINVAL;
if (wrqu->data.length) {
- buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+ buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
if (buf == NULL) {
err = -ENOMEM;
goto out;
}
- memcpy(buf, extra, wrqu->data.length);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = wrqu->data.length;
@@ -12083,7 +12082,7 @@ module_param(auto_create, int, 0444);
MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
module_param_named(led, led_support, int, 0444);
-MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
+MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "debug output mask");
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index bf0eeb2e873a..d7d049c7a4fa 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -388,7 +388,7 @@ struct clx2_queue {
dma_addr_t dma_addr; /**< physical addr for BD's */
int low_mark; /**< low watermark, resume queue if free space more than this */
int high_mark; /**< high watermark, stop queue if free space less than this */
-} __attribute__ ((packed)); /* XXX */
+} __packed; /* XXX */
struct machdr32 {
__le16 frame_ctl;
@@ -399,7 +399,7 @@ struct machdr32 {
__le16 seq_ctrl; // more endians!
u8 addr4[MACADRR_BYTE_LEN];
__le16 qos_ctrl;
-} __attribute__ ((packed));
+} __packed;
struct machdr30 {
__le16 frame_ctl;
@@ -409,7 +409,7 @@ struct machdr30 {
u8 addr3[MACADRR_BYTE_LEN];
__le16 seq_ctrl; // more endians!
u8 addr4[MACADRR_BYTE_LEN];
-} __attribute__ ((packed));
+} __packed;
struct machdr26 {
__le16 frame_ctl;
@@ -419,7 +419,7 @@ struct machdr26 {
u8 addr3[MACADRR_BYTE_LEN];
__le16 seq_ctrl; // more endians!
__le16 qos_ctrl;
-} __attribute__ ((packed));
+} __packed;
struct machdr24 {
__le16 frame_ctl;
@@ -428,20 +428,20 @@ struct machdr24 {
u8 addr2[MACADRR_BYTE_LEN];
u8 addr3[MACADRR_BYTE_LEN];
__le16 seq_ctrl; // more endians!
-} __attribute__ ((packed));
+} __packed;
// TX TFD with 32 byte MAC Header
struct tx_tfd_32 {
struct machdr32 mchdr; // 32
__le32 uivplaceholder[2]; // 8
-} __attribute__ ((packed));
+} __packed;
// TX TFD with 30 byte MAC Header
struct tx_tfd_30 {
struct machdr30 mchdr; // 30
u8 reserved[2]; // 2
__le32 uivplaceholder[2]; // 8
-} __attribute__ ((packed));
+} __packed;
// tx tfd with 26 byte mac header
struct tx_tfd_26 {
@@ -449,14 +449,14 @@ struct tx_tfd_26 {
u8 reserved1[2]; // 2
__le32 uivplaceholder[2]; // 8
u8 reserved2[4]; // 4
-} __attribute__ ((packed));
+} __packed;
// tx tfd with 24 byte mac header
struct tx_tfd_24 {
struct machdr24 mchdr; // 24
__le32 uivplaceholder[2]; // 8
u8 reserved[8]; // 8
-} __attribute__ ((packed));
+} __packed;
#define DCT_WEP_KEY_FIELD_LENGTH 16
@@ -465,7 +465,7 @@ struct tfd_command {
u8 length;
__le16 reserved;
u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
struct tfd_data {
/* Header */
@@ -504,14 +504,14 @@ struct tfd_data {
__le32 num_chunks;
__le32 chunk_ptr[NUM_TFD_CHUNKS];
__le16 chunk_len[NUM_TFD_CHUNKS];
-} __attribute__ ((packed));
+} __packed;
struct txrx_control_flags {
u8 message_type;
u8 rx_seq_num;
u8 control_bits;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
#define TFD_SIZE 128
#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags))
@@ -523,7 +523,7 @@ struct tfd_frame {
struct tfd_command cmd;
u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
} u;
-} __attribute__ ((packed));
+} __packed;
typedef void destructor_func(const void *);
@@ -559,7 +559,7 @@ struct rate_histogram {
__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
} failed;
-} __attribute__ ((packed));
+} __packed;
/* statistics command response */
struct ipw_cmd_stats {
@@ -586,13 +586,13 @@ struct ipw_cmd_stats {
__le16 rx_autodetec_no_ofdm;
__le16 rx_autodetec_no_barker;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct notif_channel_result {
u8 channel_num;
struct ipw_cmd_stats stats;
u8 uReserved;
-} __attribute__ ((packed));
+} __packed;
#define SCAN_COMPLETED_STATUS_COMPLETE 1
#define SCAN_COMPLETED_STATUS_ABORTED 2
@@ -602,24 +602,24 @@ struct notif_scan_complete {
u8 num_channels;
u8 status;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
struct notif_frag_length {
__le16 frag_length;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct notif_beacon_state {
__le32 state;
__le32 number;
-} __attribute__ ((packed));
+} __packed;
struct notif_tgi_tx_key {
u8 key_state;
u8 security_type;
u8 station_index;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
#define SILENCE_OVER_THRESH (1)
#define SILENCE_UNDER_THRESH (2)
@@ -631,25 +631,25 @@ struct notif_link_deterioration {
struct rate_histogram histogram;
u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */
__le16 silence_count;
-} __attribute__ ((packed));
+} __packed;
struct notif_association {
u8 state;
-} __attribute__ ((packed));
+} __packed;
struct notif_authenticate {
u8 state;
struct machdr24 addr;
__le16 status;
-} __attribute__ ((packed));
+} __packed;
struct notif_calibration {
u8 data[104];
-} __attribute__ ((packed));
+} __packed;
struct notif_noise {
__le32 value;
-} __attribute__ ((packed));
+} __packed;
struct ipw_rx_notification {
u8 reserved[8];
@@ -669,7 +669,7 @@ struct ipw_rx_notification {
struct notif_noise noise;
u8 raw[0];
} u;
-} __attribute__ ((packed));
+} __packed;
struct ipw_rx_frame {
__le32 reserved1;
@@ -692,14 +692,14 @@ struct ipw_rx_frame {
u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen
__le16 length;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
struct ipw_rx_header {
u8 message_type;
u8 rx_seq_num;
u8 control_bits;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
struct ipw_rx_packet {
struct ipw_rx_header header;
@@ -707,7 +707,7 @@ struct ipw_rx_packet {
struct ipw_rx_frame frame;
struct ipw_rx_notification notification;
} u;
-} __attribute__ ((packed));
+} __packed;
#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12
#define IPW_RX_FRAME_SIZE (unsigned int)(sizeof(struct ipw_rx_header) + \
@@ -717,7 +717,7 @@ struct ipw_rx_mem_buffer {
dma_addr_t dma_addr;
struct sk_buff *skb;
struct list_head list;
-}; /* Not transferred over network, so not __attribute__ ((packed)) */
+}; /* Not transferred over network, so not __packed */
struct ipw_rx_queue {
struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
@@ -730,7 +730,7 @@ struct ipw_rx_queue {
struct list_head rx_free; /* Own an SKBs */
struct list_head rx_used; /* No SKB allocated */
spinlock_t lock;
-}; /* Not transferred over network, so not __attribute__ ((packed)) */
+}; /* Not transferred over network, so not __packed */
struct alive_command_responce {
u8 alive_command;
@@ -745,21 +745,21 @@ struct alive_command_responce {
__le16 reserved4;
u8 time_stamp[5]; /* month, day, year, hours, minutes */
u8 ucode_valid;
-} __attribute__ ((packed));
+} __packed;
#define IPW_MAX_RATES 12
struct ipw_rates {
u8 num_rates;
u8 rates[IPW_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
struct command_block {
unsigned int control;
u32 source_addr;
u32 dest_addr;
unsigned int status;
-} __attribute__ ((packed));
+} __packed;
#define CB_NUMBER_OF_ELEMENTS_SMALL 64
struct fw_image_desc {
@@ -792,7 +792,7 @@ struct ipw_sys_config {
u8 accept_all_mgmt_frames;
u8 pass_noise_stats_to_host;
u8 reserved3;
-} __attribute__ ((packed));
+} __packed;
struct ipw_multicast_addr {
u8 num_of_multicast_addresses;
@@ -801,7 +801,7 @@ struct ipw_multicast_addr {
u8 mac2[6];
u8 mac3[6];
u8 mac4[6];
-} __attribute__ ((packed));
+} __packed;
#define DCW_WEP_KEY_INDEX_MASK 0x03 /* bits [0:1] */
#define DCW_WEP_KEY_SEC_TYPE_MASK 0x30 /* bits [4:5] */
@@ -822,7 +822,7 @@ struct ipw_wep_key {
u8 key_index;
u8 key_size;
u8 key[16];
-} __attribute__ ((packed));
+} __packed;
struct ipw_tgi_tx_key {
u8 key_id;
@@ -831,7 +831,7 @@ struct ipw_tgi_tx_key {
u8 flags;
u8 key[16];
__le32 tx_counter[2];
-} __attribute__ ((packed));
+} __packed;
#define IPW_SCAN_CHANNELS 54
@@ -840,7 +840,7 @@ struct ipw_scan_request {
__le16 dwell_time;
u8 channels_list[IPW_SCAN_CHANNELS];
u8 channels_reserved[3];
-} __attribute__ ((packed));
+} __packed;
enum {
IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0,
@@ -857,7 +857,7 @@ struct ipw_scan_request_ext {
u8 scan_type[IPW_SCAN_CHANNELS / 2];
u8 reserved;
__le16 dwell_time[IPW_SCAN_TYPES];
-} __attribute__ ((packed));
+} __packed;
static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
{
@@ -902,7 +902,7 @@ struct ipw_associate {
u8 smr;
u8 reserved1;
__le16 reserved2;
-} __attribute__ ((packed));
+} __packed;
struct ipw_supported_rates {
u8 ieee_mode;
@@ -910,36 +910,36 @@ struct ipw_supported_rates {
u8 purpose;
u8 reserved;
u8 supported_rates[IPW_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
struct ipw_rts_threshold {
__le16 rts_threshold;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct ipw_frag_threshold {
__le16 frag_threshold;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct ipw_retry_limit {
u8 short_retry_limit;
u8 long_retry_limit;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct ipw_dino_config {
__le32 dino_config_addr;
__le16 dino_config_size;
u8 dino_response;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
struct ipw_aironet_info {
u8 id;
u8 length;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct ipw_rx_key {
u8 station_index;
@@ -950,25 +950,25 @@ struct ipw_rx_key {
u8 station_address[6];
u8 key_index;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
struct ipw_country_channel_info {
u8 first_channel;
u8 no_channels;
s8 max_tx_power;
-} __attribute__ ((packed));
+} __packed;
struct ipw_country_info {
u8 id;
u8 length;
u8 country_str[3];
struct ipw_country_channel_info groups[7];
-} __attribute__ ((packed));
+} __packed;
struct ipw_channel_tx_power {
u8 channel_number;
s8 tx_power;
-} __attribute__ ((packed));
+} __packed;
#define SCAN_ASSOCIATED_INTERVAL (HZ)
#define SCAN_INTERVAL (HZ / 10)
@@ -979,18 +979,18 @@ struct ipw_tx_power {
u8 num_channels;
u8 ieee_mode;
struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS];
-} __attribute__ ((packed));
+} __packed;
struct ipw_rsn_capabilities {
u8 id;
u8 length;
__le16 version;
-} __attribute__ ((packed));
+} __packed;
struct ipw_sensitivity_calib {
__le16 beacon_rssi_raw;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
/**
* Host command structure.
@@ -1019,7 +1019,7 @@ struct ipw_cmd { /* XXX */
* nParams=(len+3)/4+status_len
*/
u32 param[0];
-} __attribute__ ((packed));
+} __packed;
#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */
@@ -1114,7 +1114,7 @@ struct ipw_event { /* XXX */
u32 event;
u32 time;
u32 data;
-} __attribute__ ((packed));
+} __packed;
struct ipw_fw_error { /* XXX */
unsigned long jiffies;
@@ -1125,7 +1125,7 @@ struct ipw_fw_error { /* XXX */
struct ipw_error_elem *elem;
struct ipw_event *log;
u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
#ifdef CONFIG_IPW2200_PROMISCUOUS
@@ -1170,7 +1170,7 @@ struct ipw_rt_hdr {
s8 rt_dbmnoise;
u8 rt_antenna; /* antenna number */
u8 payload[0]; /* payload... */
-} __attribute__ ((packed));
+} __packed;
#endif
struct ipw_priv {
@@ -1957,7 +1957,7 @@ enum {
struct ipw_fixed_rate {
__le16 tx_rates;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
#define IPW_INDIRECT_ADDR_MASK (~0x3ul)
@@ -1966,14 +1966,14 @@ struct host_cmd {
u8 len;
u16 reserved;
u32 *param;
-} __attribute__ ((packed)); /* XXX */
+} __packed; /* XXX */
struct cmdlog_host_cmd {
u8 cmd;
u8 len;
__le16 reserved;
char param[124];
-} __attribute__ ((packed));
+} __packed;
struct ipw_cmd_log {
unsigned long jiffies;
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 284b0e4cb815..4736861bc4f8 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -154,7 +154,7 @@ struct libipw_snap_hdr {
u8 ctrl; /* always 0x03 */
u8 oui[P80211_OUI_LEN]; /* organizational universal id */
-} __attribute__ ((packed));
+} __packed;
#define SNAP_SIZE sizeof(struct libipw_snap_hdr)
@@ -323,7 +323,7 @@ struct libipw_security {
u8 keys[WEP_KEYS][SCM_KEY_LEN];
u8 level;
u16 flags;
-} __attribute__ ((packed));
+} __packed;
/*
@@ -347,7 +347,7 @@ struct libipw_hdr_1addr {
__le16 duration_id;
u8 addr1[ETH_ALEN];
u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_hdr_2addr {
__le16 frame_ctl;
@@ -355,7 +355,7 @@ struct libipw_hdr_2addr {
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_hdr_3addr {
__le16 frame_ctl;
@@ -365,7 +365,7 @@ struct libipw_hdr_3addr {
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_hdr_4addr {
__le16 frame_ctl;
@@ -376,7 +376,7 @@ struct libipw_hdr_4addr {
__le16 seq_ctl;
u8 addr4[ETH_ALEN];
u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_hdr_3addrqos {
__le16 frame_ctl;
@@ -387,13 +387,13 @@ struct libipw_hdr_3addrqos {
__le16 seq_ctl;
u8 payload[0];
__le16 qos_ctl;
-} __attribute__ ((packed));
+} __packed;
struct libipw_info_element {
u8 id;
u8 len;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
/*
* These are the data types that can make up management packets
@@ -406,7 +406,7 @@ struct libipw_info_element {
u16 listen_interval;
struct {
u16 association_id:14, reserved:2;
- } __attribute__ ((packed));
+ } __packed;
u32 time_stamp[2];
u16 reason;
u16 status;
@@ -419,7 +419,7 @@ struct libipw_auth {
__le16 status;
/* challenge */
struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_channel_switch {
u8 id;
@@ -427,7 +427,7 @@ struct libipw_channel_switch {
u8 mode;
u8 channel;
u8 count;
-} __attribute__ ((packed));
+} __packed;
struct libipw_action {
struct libipw_hdr_3addr header;
@@ -441,12 +441,12 @@ struct libipw_action {
struct libipw_channel_switch channel_switch;
} format;
-} __attribute__ ((packed));
+} __packed;
struct libipw_disassoc {
struct libipw_hdr_3addr header;
__le16 reason;
-} __attribute__ ((packed));
+} __packed;
/* Alias deauth for disassoc */
#define libipw_deauth libipw_disassoc
@@ -455,7 +455,7 @@ struct libipw_probe_request {
struct libipw_hdr_3addr header;
/* SSID, supported rates */
struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_probe_response {
struct libipw_hdr_3addr header;
@@ -465,7 +465,7 @@ struct libipw_probe_response {
/* SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN */
struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
/* Alias beacon for probe_response */
#define libipw_beacon libipw_probe_response
@@ -476,7 +476,7 @@ struct libipw_assoc_request {
__le16 listen_interval;
/* SSID, supported rates, RSN */
struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_reassoc_request {
struct libipw_hdr_3addr header;
@@ -484,7 +484,7 @@ struct libipw_reassoc_request {
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_assoc_response {
struct libipw_hdr_3addr header;
@@ -493,7 +493,7 @@ struct libipw_assoc_response {
__le16 aid;
/* supported rates */
struct libipw_info_element info_element[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_txb {
u8 nr_frags;
@@ -555,19 +555,19 @@ struct libipw_qos_information_element {
u8 qui_subtype;
u8 version;
u8 ac_info;
-} __attribute__ ((packed));
+} __packed;
struct libipw_qos_ac_parameter {
u8 aci_aifsn;
u8 ecw_min_max;
__le16 tx_op_limit;
-} __attribute__ ((packed));
+} __packed;
struct libipw_qos_parameter_info {
struct libipw_qos_information_element info_element;
u8 reserved;
struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
-} __attribute__ ((packed));
+} __packed;
struct libipw_qos_parameters {
__le16 cw_min[QOS_QUEUE_NUM];
@@ -575,7 +575,7 @@ struct libipw_qos_parameters {
u8 aifs[QOS_QUEUE_NUM];
u8 flag[QOS_QUEUE_NUM];
__le16 tx_op_limit[QOS_QUEUE_NUM];
-} __attribute__ ((packed));
+} __packed;
struct libipw_qos_data {
struct libipw_qos_parameters parameters;
@@ -588,7 +588,7 @@ struct libipw_qos_data {
struct libipw_tim_parameters {
u8 tim_count;
u8 tim_period;
-} __attribute__ ((packed));
+} __packed;
/*******************************************************/
@@ -606,7 +606,7 @@ struct libipw_basic_report {
__le64 start_time;
__le16 duration;
u8 map;
-} __attribute__ ((packed));
+} __packed;
enum { /* libipw_measurement_request.mode */
/* Bit 0 is reserved */
@@ -627,7 +627,7 @@ struct libipw_measurement_params {
u8 channel;
__le64 start_time;
__le16 duration;
-} __attribute__ ((packed));
+} __packed;
struct libipw_measurement_request {
struct libipw_info_element ie;
@@ -635,7 +635,7 @@ struct libipw_measurement_request {
u8 mode;
u8 type;
struct libipw_measurement_params params[0];
-} __attribute__ ((packed));
+} __packed;
struct libipw_measurement_report {
struct libipw_info_element ie;
@@ -645,17 +645,17 @@ struct libipw_measurement_report {
union {
struct libipw_basic_report basic[0];
} u;
-} __attribute__ ((packed));
+} __packed;
struct libipw_tpc_report {
u8 transmit_power;
u8 link_margin;
-} __attribute__ ((packed));
+} __packed;
struct libipw_channel_map {
u8 channel;
u8 map;
-} __attribute__ ((packed));
+} __packed;
struct libipw_ibss_dfs {
struct libipw_info_element ie;
@@ -668,14 +668,14 @@ struct libipw_csa {
u8 mode;
u8 channel;
u8 count;
-} __attribute__ ((packed));
+} __packed;
struct libipw_quiet {
u8 count;
u8 period;
u8 duration;
u8 offset;
-} __attribute__ ((packed));
+} __packed;
struct libipw_network {
/* These entries are used to identify a unique network */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index dc8ed1527666..6491e27baac5 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -30,9 +30,11 @@ config IWLWIFI_DEBUG
config IWLWIFI_DEBUGFS
bool "iwlagn debugfs support"
- depends on IWLWIFI && IWLWIFI_DEBUG && MAC80211_DEBUGFS
+ depends on IWLWIFI && MAC80211_DEBUGFS
---help---
- Enable creation of debugfs files for the iwlwifi drivers.
+ Enable creation of debugfs files for the iwlwifi drivers. This
+ is a low-impact option that allows getting insight into the
+ driver's state at runtime.
config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing"
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 6be2992f8f21..dba91e0233b6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -158,6 +158,8 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
BIT(IWL_CALIB_TX_IQ_PERD) |
BIT(IWL_CALIB_BASE_BAND);
+ priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
index 6a9c64a50e36..ef0835b01b6b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
@@ -28,6 +28,28 @@
#include "iwl-3945-debugfs.h"
+
+static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+ int p = 0;
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+ le32_to_cpu(priv->_3945.statistics.flag));
+ if (le32_to_cpu(priv->_3945.statistics.flag) &
+ UCODE_STATISTICS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (le32_to_cpu(priv->_3945.statistics.flag) &
+ UCODE_STATISTICS_FREQUENCY_MSK)
+ ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (le32_to_cpu(priv->_3945.statistics.flag) &
+ UCODE_STATISTICS_NARROW_BAND_MSK)
+ ? "enabled" : "disabled");
+ return p;
+}
+
ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -70,7 +92,7 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
max_cck = &priv->_3945.max_delta.rx.cck;
max_general = &priv->_3945.max_delta.rx.general;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += iwl3945_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_Rx - OFDM:");
@@ -331,7 +353,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
accum_tx = &priv->_3945.accum_statistics.tx;
delta_tx = &priv->_3945.delta_statistics.tx;
max_tx = &priv->_3945.max_delta.tx;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += iwl3945_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_Tx:");
@@ -438,7 +460,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file,
accum_div = &priv->_3945.accum_statistics.general.div;
delta_div = &priv->_3945.delta_statistics.general.div;
max_div = &priv->_3945.max_delta.general.div;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += iwl3945_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_General:");
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
index 042f6bc0df13..2c9ed2b502a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -175,13 +175,13 @@
struct iwl3945_tfd_tb {
__le32 addr;
__le32 len;
-} __attribute__ ((packed));
+} __packed;
struct iwl3945_tfd {
__le32 control_flags;
struct iwl3945_tfd_tb tbs[4];
u8 __pad[28];
-} __attribute__ ((packed));
+} __packed;
#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 91bcb4e3cdfb..7c731a793632 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -96,7 +96,7 @@ struct iwl3945_eeprom_txpower_sample {
u8 gain_index; /* index into power (gain) setup table ... */
s8 power; /* ... for this pwr level for this chnl group */
u16 v_det; /* PA output voltage */
-} __attribute__ ((packed));
+} __packed;
/*
* Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
@@ -117,7 +117,7 @@ struct iwl3945_eeprom_txpower_group {
u8 group_channel; /* "representative" channel # in this band */
s16 temperature; /* h/w temperature at factory calib this band
* (signed) */
-} __attribute__ ((packed));
+} __packed;
/*
* Temperature-based Tx-power compensation data, not band-specific.
@@ -131,7 +131,7 @@ struct iwl3945_eeprom_temperature_corr {
u32 Tc;
u32 Td;
u32 Te;
-} __attribute__ ((packed));
+} __packed;
/*
* EEPROM map
@@ -215,7 +215,7 @@ struct iwl3945_eeprom {
/* abs.ofs: 512 */
struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
u8 reserved16[172]; /* fill out to full 1024 byte block */
-} __attribute__ ((packed));
+} __packed;
#define IWL3945_EEPROM_IMG_SIZE 1024
@@ -274,7 +274,7 @@ static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
* and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
struct iwl3945_shared {
__le32 tx_base_ptr[8];
-} __attribute__ ((packed));
+} __packed;
static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index c44a303e62ed..93d513e14186 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -279,8 +279,8 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
tx_info = &txq->txb[txq->q.read_ptr];
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
- tx_info->skb[0] = NULL;
+ ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
+ tx_info->skb = NULL;
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
}
@@ -315,7 +315,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
return;
}
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
ieee80211_tx_info_clear_status(info);
/* Fill the MRR chain with some info about on-chip retransmissions */
@@ -352,7 +352,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
* RX handler implementations
*
*****************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
/*
* based on the assumption of all statistics counter are in DWORD
* FIXME: This function is for debugging, do not deal with
@@ -460,7 +460,7 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(struct iwl3945_notif_statistics),
le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
#endif
iwl_recover_from_statistics(priv, pkt);
@@ -475,7 +475,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
__le32 *flag = (__le32 *)&pkt->u.raw;
if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
memset(&priv->_3945.accum_statistics, 0,
sizeof(struct iwl3945_notif_statistics));
memset(&priv->_3945.delta_statistics, 0,
@@ -494,158 +494,6 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
* Misc. internal state and helper functions
*
******************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUG
-
-/**
- * iwl3945_report_frame - dump frame to syslog during debug sessions
- *
- * You may hack this function to show different aspects of received frames,
- * including selective frame dumps.
- * group100 parameter selects whether to show 1 out of 100 good frames.
- */
-static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- struct ieee80211_hdr *header, int group100)
-{
- u32 to_us;
- u32 print_summary = 0;
- u32 print_dump = 0; /* set to 1 to dump all frames' contents */
- u32 hundred = 0;
- u32 dataframe = 0;
- __le16 fc;
- u16 seq_ctl;
- u16 channel;
- u16 phy_flags;
- u16 length;
- u16 status;
- u16 bcn_tmr;
- u32 tsf_low;
- u64 tsf;
- u8 rssi;
- u8 agc;
- u16 sig_avg;
- u16 noise_diff;
- struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
- struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
- struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- u8 *data = IWL_RX_DATA(pkt);
-
- /* MAC header */
- fc = header->frame_control;
- seq_ctl = le16_to_cpu(header->seq_ctrl);
-
- /* metadata */
- channel = le16_to_cpu(rx_hdr->channel);
- phy_flags = le16_to_cpu(rx_hdr->phy_flags);
- length = le16_to_cpu(rx_hdr->len);
-
- /* end-of-frame status and timestamp */
- status = le32_to_cpu(rx_end->status);
- bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
- tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
- tsf = le64_to_cpu(rx_end->timestamp);
-
- /* signal statistics */
- rssi = rx_stats->rssi;
- agc = rx_stats->agc;
- sig_avg = le16_to_cpu(rx_stats->sig_avg);
- noise_diff = le16_to_cpu(rx_stats->noise_diff);
-
- to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
-
- /* if data frame is to us and all is good,
- * (optionally) print summary for only 1 out of every 100 */
- if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
- cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
- dataframe = 1;
- if (!group100)
- print_summary = 1; /* print each frame */
- else if (priv->framecnt_to_us < 100) {
- priv->framecnt_to_us++;
- print_summary = 0;
- } else {
- priv->framecnt_to_us = 0;
- print_summary = 1;
- hundred = 1;
- }
- } else {
- /* print summary for all other frames */
- print_summary = 1;
- }
-
- if (print_summary) {
- char *title;
- int rate;
-
- if (hundred)
- title = "100Frames";
- else if (ieee80211_has_retry(fc))
- title = "Retry";
- else if (ieee80211_is_assoc_resp(fc))
- title = "AscRsp";
- else if (ieee80211_is_reassoc_resp(fc))
- title = "RasRsp";
- else if (ieee80211_is_probe_resp(fc)) {
- title = "PrbRsp";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_beacon(fc)) {
- title = "Beacon";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_atim(fc))
- title = "ATIM";
- else if (ieee80211_is_auth(fc))
- title = "Auth";
- else if (ieee80211_is_deauth(fc))
- title = "DeAuth";
- else if (ieee80211_is_disassoc(fc))
- title = "DisAssoc";
- else
- title = "Frame";
-
- rate = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
- if (rate == -1)
- rate = 0;
- else
- rate = iwl3945_rates[rate].ieee / 2;
-
- /* print frame summary.
- * MAC addresses show just the last byte (for brevity),
- * but you can hack it to show more, if you'd like to. */
- if (dataframe)
- IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
- "len=%u, rssi=%d, chnl=%d, rate=%d,\n",
- title, le16_to_cpu(fc), header->addr1[5],
- length, rssi, channel, rate);
- else {
- /* src/dst addresses assume managed mode */
- IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, "
- "src=0x%02x, rssi=%u, tim=%lu usec, "
- "phy=0x%02x, chnl=%d\n",
- title, le16_to_cpu(fc), header->addr1[5],
- header->addr3[5], rssi,
- tsf_low - priv->scan_start_tsf,
- phy_flags, channel);
- }
- }
- if (print_dump)
- iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
-}
-
-static void iwl3945_dbg_report_frame(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- struct ieee80211_hdr *header, int group100)
-{
- if (iwl_get_debug_level(priv) & IWL_DL_RX)
- _iwl3945_dbg_report_frame(priv, pkt, header, group100);
-}
-
-#else
-static inline void iwl3945_dbg_report_frame(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- struct ieee80211_hdr *header, int group100)
-{
-}
-#endif
/* This is necessary only for a number of statistics, see the caller. */
static int iwl3945_is_network_packet(struct iwl_priv *priv,
@@ -777,8 +625,6 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
rx_status.signal, rx_status.signal,
rx_status.rate_idx);
- /* Set "1" to report good data frames in groups of 100 */
- iwl3945_dbg_report_frame(priv, pkt, header, 1);
iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
if (network_packet) {
@@ -850,25 +696,28 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
/* Unmap tx_cmd */
if (counter)
pci_unmap_single(dev,
- pci_unmap_addr(&txq->meta[index], mapping),
- pci_unmap_len(&txq->meta[index], len),
+ dma_unmap_addr(&txq->meta[index], mapping),
+ dma_unmap_len(&txq->meta[index], len),
PCI_DMA_TODEVICE);
/* unmap chunks if any */
- for (i = 1; i < counter; i++) {
+ for (i = 1; i < counter; i++)
pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
- if (txq->txb[txq->q.read_ptr].skb[0]) {
- struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0];
- if (txq->txb[txq->q.read_ptr].skb[0]) {
- /* Can be called from interrupt context */
- dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb[0] = NULL;
- }
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
}
}
- return ;
}
/**
@@ -947,8 +796,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
}
-static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
- u16 tx_rate, u8 flags)
+static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
{
unsigned long flags_spin;
struct iwl_station_entry *station;
@@ -962,10 +810,9 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
station->sta.rate_n_flags = cpu_to_le16(tx_rate);
station->sta.mode = STA_CONTROL_MODIFY_MSK;
-
+ iwl_send_add_sta(priv, &station->sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- iwl_send_add_sta(priv, &station->sta, flags);
IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
sta_id, tx_rate);
return sta_id;
@@ -997,7 +844,7 @@ static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
- iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr);
+ iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
@@ -2473,8 +2320,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
(priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
- CMD_ASYNC);
+ IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
return 0;
@@ -2590,6 +2436,7 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
+ priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index cd4b61ae25b7..9166794eda0d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -787,6 +787,6 @@ enum {
struct iwl4965_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
-} __attribute__ ((packed));
+} __packed;
#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d3afddae8d9f..83e6a42ca2da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -346,9 +346,19 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
{
struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
+ if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
+ iwl_is_associated(priv)) {
struct iwl_calib_diff_gain_cmd cmd;
+ /* clear data for chain noise calibration algorithm */
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
cmd.diff_gain_a = 0;
@@ -419,13 +429,6 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
/* Mark so we run this algo only once! */
data->state = IWL_CHAIN_NOISE_CALIBRATED;
}
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
}
static void iwl4965_bg_txpower_work(struct work_struct *work)
@@ -669,6 +672,7 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
priv->hw_params.sens = &iwl4965_sensitivity;
+ priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
return 0;
}
@@ -1441,7 +1445,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
return ret;
}
-static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch)
{
int rc;
u8 band = 0;
@@ -1449,11 +1454,14 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
u8 ctrl_chan_high = 0;
struct iwl4965_channel_switch_cmd cmd;
const struct iwl_channel_info *ch_info;
-
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
+ struct ieee80211_vif *vif = priv->vif;
band = priv->band == IEEE80211_BAND_2GHZ;
- ch_info = iwl_get_channel_info(priv, priv->band, channel);
-
is_ht40 = is_ht40_channel(priv->staging_rxon.flags);
if (is_ht40 &&
@@ -1462,26 +1470,56 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
cmd.band = band;
cmd.expect_beacon = 0;
- cmd.channel = cpu_to_le16(channel);
+ ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq);
+ cmd.channel = cpu_to_le16(ch);
cmd.rxon_flags = priv->staging_rxon.flags;
cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+ if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+ beacon_interval)) {
+ switch_count -= (priv->ucode_beacon_time -
+ tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time = iwl_usecs_to_beacons(priv,
+ switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time = iwl_add_beacon_time(priv,
+ priv->ucode_beacon_time,
+ ucode_switch_time,
+ beacon_interval);
+ }
+ IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+ cmd.switch_time);
+ ch_info = iwl_get_channel_info(priv, priv->band, ch);
if (ch_info)
cmd.expect_beacon = is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- priv->active_rxon.channel, channel);
+ priv->active_rxon.channel, ch);
return -EFAULT;
}
- rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40,
+ rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
ctrl_chan_high, &cmd.tx_power);
if (rc) {
IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
return rc;
}
- priv->switch_rxon.channel = cpu_to_le16(channel);
+ priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
@@ -1870,7 +1908,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
agg->frame_count, agg->start_idx, idx);
- info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
+ info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
@@ -2026,6 +2064,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
int sta_id;
int freed;
u8 *qc = NULL;
+ unsigned long flags;
if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
@@ -2035,7 +2074,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
return;
}
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
memset(&info->status, 0, sizeof(info->status));
hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
@@ -2050,10 +2089,10 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
return;
}
+ spin_lock_irqsave(&priv->sta_lock, flags);
if (txq->sched_retry) {
const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
struct iwl_ht_agg *agg = NULL;
-
WARN_ON(!qc);
agg = &priv->stations[sta_id].tid[tid].agg;
@@ -2110,6 +2149,8 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
iwl_check_abort_status(priv, tx_resp->frame_count, status);
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
}
static int iwl4965_calc_rssi(struct iwl_priv *priv,
@@ -2285,7 +2326,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
* Force use of chains B and C for scan RX on 5 GHz band
* because the device has off-channel reception on chain A.
*/
- .scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+ .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
};
/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a28af7eb67eb..32710a801cb0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -208,6 +208,8 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
BIT(IWL_CALIB_TX_IQ_PERD) |
BIT(IWL_CALIB_BASE_BAND);
+ priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
return 0;
}
@@ -252,6 +254,8 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
+ priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
return 0;
}
@@ -267,33 +271,69 @@ static void iwl5150_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch)
{
struct iwl5000_channel_switch_cmd cmd;
const struct iwl_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
+ struct ieee80211_vif *vif = priv->vif;
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = sizeof(cmd),
- .flags = CMD_SIZE_HUGE,
+ .flags = CMD_SYNC,
.data = &cmd,
};
- IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
- priv->active_rxon.channel, channel);
cmd.band = priv->band == IEEE80211_BAND_2GHZ;
- cmd.channel = cpu_to_le16(channel);
+ ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq);
+ IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
+ priv->active_rxon.channel, ch);
+ cmd.channel = cpu_to_le16(ch);
cmd.rxon_flags = priv->staging_rxon.flags;
cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
- ch_info = iwl_get_channel_info(priv, priv->band, channel);
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+ if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+ beacon_interval)) {
+ switch_count -= (priv->ucode_beacon_time -
+ tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time = iwl_usecs_to_beacons(priv,
+ switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time = iwl_add_beacon_time(priv,
+ priv->ucode_beacon_time,
+ ucode_switch_time,
+ beacon_interval);
+ }
+ IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+ cmd.switch_time);
+ ch_info = iwl_get_channel_info(priv, priv->band, ch);
if (ch_info)
cmd.expect_beacon = is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- priv->active_rxon.channel, channel);
+ priv->active_rxon.channel, ch);
return -EFAULT;
}
- priv->switch_rxon.channel = cpu_to_le16(channel);
+ priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_sync(priv, &hcmd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 9fbf54cd3e1a..afdeec56b13f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -71,6 +71,10 @@
#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
+#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-"
+#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
+#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
+
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
@@ -183,6 +187,8 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
+ priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
return 0;
}
@@ -228,37 +234,74 @@ static int iwl6050_hw_set_hw_params(struct iwl_priv *priv)
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
+ priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
return 0;
}
-static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
+static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch)
{
struct iwl6000_channel_switch_cmd cmd;
const struct iwl_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
+ struct ieee80211_vif *vif = priv->vif;
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = sizeof(cmd),
- .flags = CMD_SIZE_HUGE,
+ .flags = CMD_SYNC,
.data = &cmd,
};
- IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
- priv->active_rxon.channel, channel);
-
cmd.band = priv->band == IEEE80211_BAND_2GHZ;
- cmd.channel = cpu_to_le16(channel);
+ ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq);
+ IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+ priv->active_rxon.channel, ch);
+ cmd.channel = cpu_to_le16(ch);
cmd.rxon_flags = priv->staging_rxon.flags;
cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
- ch_info = iwl_get_channel_info(priv, priv->band, channel);
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+ if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+ beacon_interval)) {
+ switch_count -= (priv->ucode_beacon_time -
+ tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time = iwl_usecs_to_beacons(priv,
+ switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time = iwl_add_beacon_time(priv,
+ priv->ucode_beacon_time,
+ ucode_switch_time,
+ beacon_interval);
+ }
+ IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+ cmd.switch_time);
+ ch_info = iwl_get_channel_info(priv, priv->band, ch);
if (ch_info)
cmd.expect_beacon = is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- priv->active_rxon.channel, channel);
+ priv->active_rxon.channel, ch);
return -EFAULT;
}
- priv->switch_rxon.channel = cpu_to_le16(channel);
+ priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_sync(priv, &hcmd);
@@ -335,6 +378,25 @@ static const struct iwl_ops iwl6000_ops = {
.led = &iwlagn_led_ops,
};
+static void do_not_send_bt_config(struct iwl_priv *priv)
+{
+}
+
+static struct iwl_hcmd_ops iwl6000g2b_hcmd = {
+ .rxon_assoc = iwlagn_send_rxon_assoc,
+ .commit_rxon = iwl_commit_rxon,
+ .set_rxon_chain = iwl_set_rxon_chain,
+ .set_tx_ant = iwlagn_send_tx_ant_config,
+ .send_bt_config = do_not_send_bt_config,
+};
+
+static const struct iwl_ops iwl6000g2b_ops = {
+ .lib = &iwl6000_lib,
+ .hcmd = &iwl6000g2b_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+};
+
static struct iwl_lib_ops iwl6050_lib = {
.set_hw_params = iwl6050_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
@@ -445,6 +507,268 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
.chain_noise_calib_by_driver = true,
};
+struct iwl_cfg iwl6000g2a_2abg_cfg = {
+ .name = "6000 Series 2x2 ABG Gen2a",
+ .fw_name_pre = IWL6000G2A_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_A|IWL_SKU_G,
+ .ops = &iwl6000_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+};
+
+struct iwl_cfg iwl6000g2a_2bg_cfg = {
+ .name = "6000 Series 2x2 BG Gen2a",
+ .fw_name_pre = IWL6000G2A_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_G,
+ .ops = &iwl6000_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+};
+
+struct iwl_cfg iwl6000g2b_2agn_cfg = {
+ .name = "6000 Series 2x2 AGN Gen2b",
+ .fw_name_pre = IWL6000G2B_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
+ .ops = &iwl6000g2b_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .ht_greenfield_support = true,
+ .led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+};
+
+struct iwl_cfg iwl6000g2b_2abg_cfg = {
+ .name = "6000 Series 2x2 ABG Gen2b",
+ .fw_name_pre = IWL6000G2B_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_A|IWL_SKU_G,
+ .ops = &iwl6000g2b_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+};
+
+struct iwl_cfg iwl6000g2b_2bgn_cfg = {
+ .name = "6000 Series 2x2 BGN Gen2b",
+ .fw_name_pre = IWL6000G2B_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_G|IWL_SKU_N,
+ .ops = &iwl6000g2b_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .ht_greenfield_support = true,
+ .led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+};
+
+struct iwl_cfg iwl6000g2b_2bg_cfg = {
+ .name = "6000 Series 2x2 BG Gen2b",
+ .fw_name_pre = IWL6000G2B_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_G,
+ .ops = &iwl6000g2b_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+};
+
+struct iwl_cfg iwl6000g2b_bgn_cfg = {
+ .name = "6000 Series 1x2 BGN Gen2b",
+ .fw_name_pre = IWL6000G2B_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_G|IWL_SKU_N,
+ .ops = &iwl6000g2b_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_A,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .ht_greenfield_support = true,
+ .led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+};
+
+struct iwl_cfg iwl6000g2b_bg_cfg = {
+ .name = "6000 Series 1x2 BG Gen2b",
+ .fw_name_pre = IWL6000G2B_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_G,
+ .ops = &iwl6000g2b_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_A,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+};
+
/*
* "i": Internal configuration, use internal Power Amplifier
*/
@@ -667,3 +991,4 @@ struct iwl_cfg iwl6000_3agn_cfg = {
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index 48c023b4ca36..3d08dc8af143 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -28,6 +28,27 @@
#include "iwl-agn-debugfs.h"
+static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+ int p = 0;
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+ le32_to_cpu(priv->statistics.flag));
+ if (le32_to_cpu(priv->statistics.flag) &
+ UCODE_STATISTICS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (le32_to_cpu(priv->statistics.flag) &
+ UCODE_STATISTICS_FREQUENCY_MSK)
+ ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (le32_to_cpu(priv->statistics.flag) &
+ UCODE_STATISTICS_NARROW_BAND_MSK)
+ ? "enabled" : "disabled");
+ return p;
+}
+
ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -75,7 +96,7 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
max_general = &priv->max_delta.rx.general;
max_ht = &priv->max_delta.rx.ofdm_ht;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += iwl_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_Rx - OFDM:");
@@ -543,7 +564,7 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
accum_tx = &priv->accum_statistics.tx;
delta_tx = &priv->delta_statistics.tx;
max_tx = &priv->max_delta.tx;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += iwl_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_Tx:");
@@ -768,7 +789,7 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
accum_div = &priv->accum_statistics.general.div;
delta_div = &priv->delta_statistics.general.div;
max_div = &priv->max_delta.general.div;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += iwl_statistics_flag(priv, buf, bufsz);
pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
"acumulative delta max\n",
"Statistics_General:");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 44ef5d93befc..3f765ba15cb8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -37,7 +37,7 @@
#include "iwl-io.h"
#include "iwl-agn.h"
-static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
+int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
{
int ret = 0;
struct iwl5000_rxon_assoc_cmd rxon_assoc;
@@ -84,7 +84,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
return ret;
}
-static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
+int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
{
struct iwl_tx_ant_config_cmd tx_ant_cmd = {
.valid = cpu_to_le32(valid_tx_ant),
@@ -176,14 +176,6 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
data->radio_write = 1;
data->state = IWL_CHAIN_NOISE_CALIBRATED;
}
-
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
}
static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
@@ -191,10 +183,20 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
int ret;
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
+ if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
+ iwl_is_associated(priv)) {
struct iwl_calib_chain_noise_reset_cmd cmd;
- memset(&cmd, 0, sizeof(cmd));
+ /* clear data for chain noise calibration algorithm */
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
cmd.hdr.first_group = 0;
cmd.hdr.groups_num = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index f9a3fbb6338f..a52b82c8e7a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -112,7 +112,7 @@
*/
struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __attribute__ ((packed));
+} __packed;
#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 0f292a210ed9..0e7b0661d61d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -77,7 +77,7 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
agg->frame_count, agg->start_idx, idx);
- info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
+ info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
@@ -93,6 +93,12 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
} else {
/* Two or more frames were attempted; expect block-ack */
u64 bitmap = 0;
+
+ /*
+ * Start is the lowest frame sent. It may not be the first
+ * frame in the batch; we figure this out dynamically during
+ * the following loop.
+ */
int start = agg->start_idx;
/* Construct bit-map of pending frames within Tx window */
@@ -131,25 +137,58 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
i, idx, SEQ_TO_SN(sc));
+ /*
+ * sh -> how many frames ahead of the starting frame is
+ * the current one?
+ *
+ * Note that all frames sent in the batch must be in a
+ * 64-frame window, so this number should be in [0,63].
+ * If outside of this window, then we've found a new
+ * "first" frame in the batch and need to change start.
+ */
sh = idx - start;
- if (sh > 64) {
- sh = (start - idx) + 0xff;
+
+ /*
+ * If >= 64, out of window. start must be at the front
+ * of the circular buffer, idx must be near the end of
+ * the buffer, and idx is the new "first" frame. Shift
+ * the indices around.
+ */
+ if (sh >= 64) {
+ /* Shift bitmap by start - idx, wrapped */
+ sh = 0x100 - idx + start;
bitmap = bitmap << sh;
+ /* Now idx is the new start so sh = 0 */
sh = 0;
start = idx;
- } else if (sh < -64)
- sh = 0xff - (start - idx);
- else if (sh < 0) {
+ /*
+ * If <= -64 then wraps the 256-pkt circular buffer
+ * (e.g., start = 255 and idx = 0, sh should be 1)
+ */
+ } else if (sh <= -64) {
+ sh = 0x100 - start + idx;
+ /*
+ * If < 0 but > -64, out of window. idx is before start
+ * but not wrapped. Shift the indices around.
+ */
+ } else if (sh < 0) {
+ /* Shift by how far start is ahead of idx */
sh = start - idx;
- start = idx;
bitmap = bitmap << sh;
+ /* Now idx is the new start so sh = 0 */
+ start = idx;
sh = 0;
}
+ /* Sequence number start + sh was sent in this batch */
bitmap |= 1ULL << sh;
IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
start, (unsigned long long)bitmap);
}
+ /*
+ * Store the bitmap and possibly the new start, if we wrapped
+ * the buffer above
+ */
agg->bitmap = bitmap;
agg->start_idx = start;
IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
@@ -184,6 +223,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
int tid;
int sta_id;
int freed;
+ unsigned long flags;
if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
@@ -193,15 +233,16 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
return;
}
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
memset(&info->status, 0, sizeof(info->status));
tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
+ spin_lock_irqsave(&priv->sta_lock, flags);
if (txq->sched_retry) {
const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg = NULL;
+ struct iwl_ht_agg *agg;
agg = &priv->stations[sta_id].tid[tid].agg;
@@ -256,6 +297,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
iwl_check_abort_status(priv, tx_resp->frame_count, status);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
}
void iwlagn_rx_handler_setup(struct iwl_priv *priv)
@@ -444,7 +486,7 @@ int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
/* Tell device where to find RBD circular buffer in DRAM */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->dma_addr >> 8));
+ (u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
@@ -709,7 +751,7 @@ void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
}
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
+ rxq->bd_dma);
dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
@@ -755,132 +797,6 @@ static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
}
-#ifdef CONFIG_IWLWIFI_DEBUG
-/**
- * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions
- *
- * You may hack this function to show different aspects of received frames,
- * including selective frame dumps.
- * group100 parameter selects whether to show 1 out of 100 good data frames.
- * All beacon and probe response frames are printed.
- */
-static void iwlagn_dbg_report_frame(struct iwl_priv *priv,
- struct iwl_rx_phy_res *phy_res, u16 length,
- struct ieee80211_hdr *header, int group100)
-{
- u32 to_us;
- u32 print_summary = 0;
- u32 print_dump = 0; /* set to 1 to dump all frames' contents */
- u32 hundred = 0;
- u32 dataframe = 0;
- __le16 fc;
- u16 seq_ctl;
- u16 channel;
- u16 phy_flags;
- u32 rate_n_flags;
- u32 tsf_low;
- int rssi;
-
- if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
- return;
-
- /* MAC header */
- fc = header->frame_control;
- seq_ctl = le16_to_cpu(header->seq_ctrl);
-
- /* metadata */
- channel = le16_to_cpu(phy_res->channel);
- phy_flags = le16_to_cpu(phy_res->phy_flags);
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* signal statistics */
- rssi = iwlagn_calc_rssi(priv, phy_res);
- tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
-
- to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
-
- /* if data frame is to us and all is good,
- * (optionally) print summary for only 1 out of every 100 */
- if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
- cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
- dataframe = 1;
- if (!group100)
- print_summary = 1; /* print each frame */
- else if (priv->framecnt_to_us < 100) {
- priv->framecnt_to_us++;
- print_summary = 0;
- } else {
- priv->framecnt_to_us = 0;
- print_summary = 1;
- hundred = 1;
- }
- } else {
- /* print summary for all other frames */
- print_summary = 1;
- }
-
- if (print_summary) {
- char *title;
- int rate_idx;
- u32 bitrate;
-
- if (hundred)
- title = "100Frames";
- else if (ieee80211_has_retry(fc))
- title = "Retry";
- else if (ieee80211_is_assoc_resp(fc))
- title = "AscRsp";
- else if (ieee80211_is_reassoc_resp(fc))
- title = "RasRsp";
- else if (ieee80211_is_probe_resp(fc)) {
- title = "PrbRsp";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_beacon(fc)) {
- title = "Beacon";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_atim(fc))
- title = "ATIM";
- else if (ieee80211_is_auth(fc))
- title = "Auth";
- else if (ieee80211_is_deauth(fc))
- title = "DeAuth";
- else if (ieee80211_is_disassoc(fc))
- title = "DisAssoc";
- else
- title = "Frame";
-
- rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
- if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
- bitrate = 0;
- WARN_ON_ONCE(1);
- } else {
- bitrate = iwl_rates[rate_idx].ieee / 2;
- }
-
- /* print frame summary.
- * MAC addresses show just the last byte (for brevity),
- * but you can hack it to show more, if you'd like to. */
- if (dataframe)
- IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
- "len=%u, rssi=%d, chnl=%d, rate=%u,\n",
- title, le16_to_cpu(fc), header->addr1[5],
- length, rssi, channel, bitrate);
- else {
- /* src/dst addresses assume managed mode */
- IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
- "len=%u, rssi=%d, tim=%lu usec, "
- "phy=0x%02x, chnl=%d\n",
- title, le16_to_cpu(fc), header->addr1[5],
- header->addr3[5], length, rssi,
- tsf_low - priv->scan_start_tsf,
- phy_flags, channel);
- }
- }
- if (print_dump)
- iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
-}
-#endif
-
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
{
u32 decrypt_out = 0;
@@ -988,7 +904,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_res *phy_res;
__le32 rx_pkt_status;
- struct iwl4965_rx_mpdu_res_start *amsdu;
+ struct iwl_rx_mpdu_res_start *amsdu;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
@@ -1017,7 +933,7 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
return;
}
phy_res = &priv->_agn.last_phy_res;
- amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
@@ -1060,11 +976,6 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
-#ifdef CONFIG_IWLWIFI_DEBUG
- /* Set "1" to report good data frames in groups of 100 */
- if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
- iwlagn_dbg_report_frame(priv, phy_res, len, header, 1);
-#endif
iwl_dbg_log_rx_data_frame(priv, len, header);
IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
rx_status.signal, (unsigned long long)rx_status.mactime);
@@ -1252,6 +1163,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
bool is_active = false;
int chan_mod;
u8 active_chains;
+ u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
conf = ieee80211_get_hw_conf(priv->hw);
@@ -1403,11 +1315,14 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
band = priv->scan_band;
- if (priv->cfg->scan_antennas[band])
- rx_ant = priv->cfg->scan_antennas[band];
+ if (priv->cfg->scan_rx_antennas[band])
+ rx_ant = priv->cfg->scan_rx_antennas[band];
- priv->scan_tx_ant[band] =
- iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
+ if (priv->cfg->scan_tx_antennas[band])
+ scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
+
+ priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
+ scan_tx_antennas);
rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
@@ -1433,13 +1348,15 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
if (!priv->is_internal_short_scan) {
cmd_len = iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
+ vif->addr,
priv->scan_request->ie,
priv->scan_request->ie_len,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
} else {
+ /* use bcast addr, will not be transmitted but must be valid */
cmd_len = iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
- NULL, 0,
+ iwl_bcast_addr, NULL, 0,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
}
@@ -1502,3 +1419,18 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
vif->bss_conf.bssid);
}
+
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed)
+{
+ WARN_ON(!spin_is_locked(&priv->sta_lock));
+
+ if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
+ priv->stations[sta_id].tid[tid].tfds_in_queue,
+ freed);
+ priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index cf4a95bae4ff..40933a5de027 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -313,8 +313,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
*/
IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
tid);
- ieee80211_stop_tx_ba_session(sta, tid,
- WLAN_BACK_INITIATOR);
+ ieee80211_stop_tx_ba_session(sta, tid);
}
} else
IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 7d614c4d3c62..84df7fca750d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -469,7 +469,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
}
/* Set up antennas */
- priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+ priv->hw_params.valid_tx_ant);
rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* Set the rate in the TX cmd */
@@ -567,10 +568,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find index into station table for destination station */
- if (!info->control.sta)
- sta_id = priv->hw_params.bcast_sta_id;
- else
- sta_id = iwl_sta_id(info->control.sta);
+ sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -598,11 +596,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
}
txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
+
+ /* irqs already disabled/saved above when locking priv->lock */
+ spin_lock(&priv->sta_lock);
+
if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (unlikely(tid >= MAX_TID_COUNT))
+ if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+ spin_unlock(&priv->sta_lock);
goto drop_unlock;
+ }
seq_number = priv->stations[sta_id].tid[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl = hdr->seq_ctrl &
@@ -620,15 +624,22 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
swq_id = txq->swq_id;
q = &txq->q;
- if (unlikely(iwl_queue_space(q) < q->high_mark))
+ if (unlikely(iwl_queue_space(q) < q->high_mark)) {
+ spin_unlock(&priv->sta_lock);
goto drop_unlock;
+ }
- if (ieee80211_is_data_qos(fc))
+ if (ieee80211_is_data_qos(fc)) {
priv->stations[sta_id].tid[tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
+ }
+
+ spin_unlock(&priv->sta_lock);
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb[0] = skb;
+ txq->txb[q->write_ptr].skb = skb;
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[q->write_ptr];
@@ -694,8 +705,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txcmd_phys = pci_map_single(priv->pci_dev,
&out_cmd->hdr, len,
PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
- pci_unmap_len_set(out_meta, len, len);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
@@ -703,8 +714,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
- if (qc)
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
@@ -1009,6 +1018,8 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
if (ret)
return ret;
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ tid_data = &priv->stations[sta_id].tid[tid];
if (tid_data->tfds_in_queue == 0) {
IWL_DEBUG_HT(priv, "HW queue is empty\n");
tid_data->agg.state = IWL_AGG_ON;
@@ -1018,6 +1029,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
tid_data->tfds_in_queue);
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
}
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
@@ -1040,11 +1052,14 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
return -ENXIO;
}
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
if (priv->stations[sta_id].tid[tid].agg.state ==
IWL_EMPTYING_HW_QUEUE_ADDBA) {
IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
@@ -1062,13 +1077,17 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
priv->stations[sta_id].tid[tid].agg.state =
IWL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
IWL_DEBUG_HT(priv, "HW queue is empty\n");
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
- spin_lock_irqsave(&priv->lock, flags);
+ /* do not restore/save irqs */
+ spin_unlock(&priv->sta_lock);
+ spin_lock(&priv->lock);
+
/*
* the only reason this call can fail is queue number out of range,
* which can happen if uCode is reloaded and all the station
@@ -1092,6 +1111,8 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
u8 *addr = priv->stations[sta_id].sta.sta.addr;
struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
+ WARN_ON(!spin_is_locked(&priv->sta_lock));
+
switch (priv->stations[sta_id].tid[tid].agg.state) {
case IWL_EMPTYING_HW_QUEUE_DELBA:
/* We are reclaiming the last packet of the */
@@ -1116,6 +1137,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
}
break;
}
+
return 0;
}
@@ -1159,12 +1181,12 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
tx_info = &txq->txb[txq->q.read_ptr];
- iwlagn_tx_status(priv, tx_info->skb[0]);
+ iwlagn_tx_status(priv, tx_info->skb);
- hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+ hdr = (struct ieee80211_hdr *)tx_info->skb->data;
if (hdr && ieee80211_is_data_qos(hdr->frame_control))
nfreed++;
- tx_info->skb[0] = NULL;
+ tx_info->skb = NULL;
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
@@ -1188,7 +1210,7 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
int i, sh, ack;
u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- u64 bitmap;
+ u64 bitmap, sent_bitmap;
int successes = 0;
struct ieee80211_tx_info *info;
@@ -1216,24 +1238,26 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
/* check for success or failure according to the
* transmitted bitmap and block-ack bitmap */
- bitmap &= agg->bitmap;
+ sent_bitmap = bitmap & agg->bitmap;
/* For each frame attempted in aggregation,
* update driver's record of tx frame's status. */
- for (i = 0; i < agg->frame_count ; i++) {
- ack = bitmap & (1ULL << i);
- successes += !!ack;
+ i = 0;
+ while (sent_bitmap) {
+ ack = sent_bitmap & 1ULL;
+ successes += ack;
IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
agg->start_idx + i);
+ sent_bitmap >>= 1;
+ ++i;
}
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
+ info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
memset(&info->status, 0, sizeof(info->status));
info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = successes;
- info->status.ampdu_ack_map = bitmap;
info->status.ampdu_len = agg->frame_count;
iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
@@ -1281,6 +1305,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
int index;
int sta_id;
int tid;
+ unsigned long flags;
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
@@ -1308,7 +1333,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
/* Find index just before block-ack window */
index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
- /* TODO: Need to get this copy more safely - now good for debug */
+ spin_lock_irqsave(&priv->sta_lock, flags);
IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
"sta_id = %d\n",
@@ -1344,4 +1369,6 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
}
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 637286c396fe..6f77441cb65a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -423,3 +423,126 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
return 0;
}
+
+
+/**
+ * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
+{
+ u32 val;
+ int ret = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IWL_DL_IO is set */
+ iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ i + IWLAGN_RTC_INST_LOWER_BOUND);
+ val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
+ u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int ret = 0;
+ u32 errcnt;
+
+ IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+ iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ IWLAGN_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IWL_DL_IO is set */
+ val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IWL_ERR(priv, "uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ IWL_DEBUG_INFO(priv,
+ "ucode image in INSTRUCTION memory is good\n");
+
+ return ret;
+}
+
+/**
+ * iwl_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+int iwl_verify_ucode(struct iwl_priv *priv)
+{
+ __le32 *image;
+ u32 len;
+ int ret;
+
+ /* Try bootstrap */
+ image = (__le32 *)priv->ucode_boot.v_addr;
+ len = priv->ucode_boot.len;
+ ret = iwlcore_verify_inst_sparse(priv, image, len);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *)priv->ucode_init.v_addr;
+ len = priv->ucode_init.len;
+ ret = iwlcore_verify_inst_sparse(priv, image, len);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *)priv->ucode_code.v_addr;
+ len = priv->ucode_code.len;
+ ret = iwlcore_verify_inst_sparse(priv, image, len);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *)priv->ucode_boot.v_addr;
+ len = priv->ucode_boot.len;
+ ret = iwl_verify_inst_full(priv, image, len);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 24aff654fa9c..d857f8496f69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -120,7 +120,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
(priv->switch_rxon.channel != priv->staging_rxon.channel)) {
IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
le16_to_cpu(priv->switch_rxon.channel));
- priv->switch_rxon.switch_in_progress = false;
+ iwl_chswitch_done(priv, false);
}
/* If we don't need to send a full RXON, we can use
@@ -367,7 +367,8 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
/* Set up packet rate and flags */
rate = iwl_rate_get_lowest_plcp(priv);
- priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+ priv->hw_params.valid_tx_ant);
rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
rate_flags |= RATE_MCS_CCK_MSK;
@@ -474,18 +475,25 @@ void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
/* Unmap tx_cmd */
if (num_tbs)
pci_unmap_single(dev,
- pci_unmap_addr(&txq->meta[index], mapping),
- pci_unmap_len(&txq->meta[index], len),
+ dma_unmap_addr(&txq->meta[index], mapping),
+ dma_unmap_len(&txq->meta[index], len),
PCI_DMA_BIDIRECTIONAL);
/* Unmap chunks, if any. */
- for (i = 1; i < num_tbs; i++) {
+ for (i = 1; i < num_tbs; i++)
pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
- if (txq->txb) {
- dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
- txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
}
}
}
@@ -933,6 +941,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
fill_rx = 1;
while (i != r) {
+ int len;
+
rxb = rxq->queue[i];
/* If an RXB doesn't have a Rx queue slot associated with it,
@@ -947,8 +957,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
PCI_DMA_FROMDEVICE);
pkt = rxb_addr(rxb);
- trace_iwlwifi_dev_rx(priv, pkt,
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(priv, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
@@ -1466,7 +1477,12 @@ bool iwl_good_ack_health(struct iwl_priv *priv,
" expected_ack_cnt = %d\n",
actual_ack_cnt_delta, expected_ack_cnt_delta);
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /*
+ * This is ifdef'ed on DEBUGFS because otherwise the
+ * statistics aren't available. If DEBUGFS is set but
+ * DEBUG is not, these will just compile out.
+ */
IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
priv->delta_statistics.tx.rx_detected_cnt);
IWL_DEBUG_RADIO(priv,
@@ -1694,6 +1710,9 @@ struct iwlagn_firmware_pieces {
size_t inst_size, data_size, init_size, init_data_size, boot_size;
u32 build;
+
+ u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+ u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
};
static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
@@ -1871,6 +1890,42 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
capa->max_probe_length =
le32_to_cpup((__le32 *)tlv_data);
break;
+ case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
+ if (tlv_len != 4)
+ return -EINVAL;
+ pieces->init_evtlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
+ if (tlv_len != 4)
+ return -EINVAL;
+ pieces->init_evtlog_size =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
+ if (tlv_len != 4)
+ return -EINVAL;
+ pieces->init_errlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
+ if (tlv_len != 4)
+ return -EINVAL;
+ pieces->inst_evtlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
+ if (tlv_len != 4)
+ return -EINVAL;
+ pieces->inst_evtlog_size =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
+ if (tlv_len != 4)
+ return -EINVAL;
+ pieces->inst_errlog_ptr =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
default:
break;
}
@@ -2063,6 +2118,26 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
goto err_pci_alloc;
}
+ /* Now that we can no longer fail, copy information */
+
+ /*
+ * The (size - 16) / 12 formula is based on the information recorded
+ * for each event, which is of mode 1 (including timestamp) for all
+ * new microcodes that include this information.
+ */
+ priv->_agn.init_evtlog_ptr = pieces.init_evtlog_ptr;
+ if (pieces.init_evtlog_size)
+ priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
+ else
+ priv->_agn.init_evtlog_size = priv->cfg->max_event_log_size;
+ priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr;
+ priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr;
+ if (pieces.inst_evtlog_size)
+ priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
+ else
+ priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size;
+ priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr;
+
/* Copy images into buffers for card's bus-master reads ... */
/* Runtime instructions (first block of data in file) */
@@ -2195,10 +2270,15 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
u32 blink1, blink2, ilink1, ilink2;
u32 pc, hcmd;
- if (priv->ucode_type == UCODE_INIT)
+ if (priv->ucode_type == UCODE_INIT) {
base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
- else
+ if (!base)
+ base = priv->_agn.init_errlog_ptr;
+ } else {
base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
+ if (!base)
+ base = priv->_agn.inst_errlog_ptr;
+ }
if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
IWL_ERR(priv,
@@ -2258,10 +2338,16 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
if (num_events == 0)
return pos;
- if (priv->ucode_type == UCODE_INIT)
+
+ if (priv->ucode_type == UCODE_INIT) {
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
- else
+ if (!base)
+ base = priv->_agn.init_evtlog_ptr;
+ } else {
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ if (!base)
+ base = priv->_agn.inst_evtlog_ptr;
+ }
if (mode == 0)
event_size = 2 * sizeof(u32);
@@ -2363,13 +2449,21 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
+ u32 logsize;
int pos = 0;
size_t bufsz = 0;
- if (priv->ucode_type == UCODE_INIT)
+ if (priv->ucode_type == UCODE_INIT) {
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
- else
+ logsize = priv->_agn.init_evtlog_size;
+ if (!base)
+ base = priv->_agn.init_evtlog_ptr;
+ } else {
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ logsize = priv->_agn.inst_evtlog_size;
+ if (!base)
+ base = priv->_agn.inst_evtlog_ptr;
+ }
if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
IWL_ERR(priv,
@@ -2384,16 +2478,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
- if (capacity > priv->cfg->max_event_log_size) {
+ if (capacity > logsize) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, priv->cfg->max_event_log_size);
- capacity = priv->cfg->max_event_log_size;
+ capacity, logsize);
+ capacity = logsize;
}
- if (next_entry > priv->cfg->max_event_log_size) {
+ if (next_entry > logsize) {
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, priv->cfg->max_event_log_size);
- next_entry = priv->cfg->max_event_log_size;
+ next_entry, logsize);
+ next_entry = logsize;
}
size = num_wraps ? capacity : next_entry;
@@ -2518,8 +2612,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
-
- memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
}
/* Configure Bluetooth device coexistence support */
@@ -2934,20 +3026,16 @@ void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
vif->bss_conf.aid, vif->bss_conf.beacon_int);
- if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.use_short_preamble)
priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.assoc_capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.use_short_slot)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
- if (vif->type == NL80211_IFTYPE_ADHOC)
- priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
}
iwlcore_commit_rxon(priv);
@@ -3173,8 +3261,7 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
priv->staging_rxon.assoc_id = 0;
- if (vif->bss_conf.assoc_capability &
- WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.use_short_preamble)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_PREAMBLE_MSK;
else
@@ -3182,17 +3269,12 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.assoc_capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.use_short_slot)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
-
- if (vif->type == NL80211_IFTYPE_ADHOC)
- priv->staging_rxon.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
@@ -3238,17 +3320,9 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- if (sta) {
- sta_id = iwl_sta_id(sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- sta->addr);
- return -EINVAL;
- }
- } else {
- sta_id = priv->hw_params.bcast_sta_id;
- }
+ sta_id = iwl_sta_id_or_broadcast(priv, sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return -EINVAL;
mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
@@ -3423,6 +3497,98 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
return 0;
}
+static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct iwl_priv *priv = hw->priv;
+ const struct iwl_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ u16 ch;
+ unsigned long flags = 0;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (iwl_is_rfkill(priv))
+ goto out_exit;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status))
+ goto out_exit;
+
+ if (!iwl_is_associated(priv))
+ goto out_exit;
+
+ /* channel switch in progress */
+ if (priv->switch_rxon.switch_in_progress == true)
+ goto out_exit;
+
+ mutex_lock(&priv->mutex);
+ if (priv->cfg->ops->lib->set_channel_switch) {
+
+ ch = ieee80211_frequency_to_channel(
+ ch_switch->channel->center_freq);
+ if (le16_to_cpu(priv->active_rxon.channel) != ch) {
+ ch_info = iwl_get_channel_info(priv,
+ conf->channel->band,
+ ch);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+ goto out;
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ht_conf->is_ht = conf_is_ht(conf);
+ if (ht_conf->is_ht) {
+ if (conf_is_ht40_minus(conf)) {
+ ht_conf->extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ht_conf->is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ht_conf->extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ht_conf->is_40mhz = true;
+ } else {
+ ht_conf->extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ht_conf->is_40mhz = false;
+ }
+ } else
+ ht_conf->is_40mhz = false;
+
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
+ priv->staging_rxon.flags = 0;
+
+ iwl_set_rxon_channel(priv, conf->channel);
+ iwl_set_rxon_ht(priv, ht_conf);
+ iwl_set_flags_for_band(priv, conf->channel->band,
+ priv->vif);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl_set_rate(priv);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ if (priv->cfg->ops->lib->set_channel_switch(priv,
+ ch_switch))
+ priv->switch_rxon.switch_in_progress = false;
+ }
+ }
+out:
+ mutex_unlock(&priv->mutex);
+out_exit:
+ if (!priv->switch_rxon.switch_in_progress)
+ ieee80211_chswitch_done(priv->vif, false);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
/*****************************************************************************
*
* driver setup and teardown
@@ -3479,6 +3645,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work(&priv->scan_check);
cancel_work_sync(&priv->start_internal_scan);
cancel_delayed_work(&priv->alive_start);
+ cancel_work_sync(&priv->run_time_calib_work);
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
@@ -3594,6 +3761,7 @@ static struct ieee80211_ops iwl_hw_ops = {
.sta_notify = iwl_mac_sta_notify,
.sta_add = iwlagn_mac_sta_add,
.sta_remove = iwl_mac_sta_remove,
+ .channel_switch = iwl_mac_channel_switch,
};
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3604,6 +3772,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
unsigned long flags;
u16 pci_cmd;
+ u8 perm_addr[ETH_ALEN];
/************************
* 1. Allocating HW data
@@ -3633,9 +3802,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->pci_dev = pdev;
priv->inta_mask = CSR_INI_SET_MASK;
-#ifdef CONFIG_IWLWIFI_DEBUG
- atomic_set(&priv->restrict_refcnt, 0);
-#endif
if (iwl_alloc_traffic_mem(priv))
IWL_ERR(priv, "Not enough memory to generate traffic log\n");
@@ -3724,9 +3890,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_eeprom;
/* extract MAC Address */
- iwl_eeprom_get_mac(priv, priv->mac_addr);
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr);
- SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
+ iwl_eeprom_get_mac(priv, perm_addr);
+ IWL_DEBUG_INFO(priv, "MAC address: %pM\n", perm_addr);
+ SET_IEEE80211_PERM_ADDR(priv->hw, perm_addr);
/************************
* 5. Setup HW constants
@@ -3993,6 +4159,47 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6000g2a_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)},
+
+/* 6x00 Series Gen2b */
+ {IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)},
+ {IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6000g2b_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6000g2b_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6000g2b_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6000g2b_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6000g2b_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6000g2b_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6000g2b_2abg_cfg)},
/* 6x50 WiFi/WiMax Series */
{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 2d748053358e..5c32777b0a49 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,6 +65,33 @@
#include "iwl-dev.h"
+/* configuration for the _agn devices */
+extern struct iwl_cfg iwl4965_agn_cfg;
+extern struct iwl_cfg iwl5300_agn_cfg;
+extern struct iwl_cfg iwl5100_agn_cfg;
+extern struct iwl_cfg iwl5350_agn_cfg;
+extern struct iwl_cfg iwl5100_bgn_cfg;
+extern struct iwl_cfg iwl5100_abg_cfg;
+extern struct iwl_cfg iwl5150_agn_cfg;
+extern struct iwl_cfg iwl5150_abg_cfg;
+extern struct iwl_cfg iwl6000g2a_2agn_cfg;
+extern struct iwl_cfg iwl6000g2a_2abg_cfg;
+extern struct iwl_cfg iwl6000g2a_2bg_cfg;
+extern struct iwl_cfg iwl6000g2b_bgn_cfg;
+extern struct iwl_cfg iwl6000g2b_bg_cfg;
+extern struct iwl_cfg iwl6000g2b_2agn_cfg;
+extern struct iwl_cfg iwl6000g2b_2abg_cfg;
+extern struct iwl_cfg iwl6000g2b_2bgn_cfg;
+extern struct iwl_cfg iwl6000g2b_2bg_cfg;
+extern struct iwl_cfg iwl6000i_2agn_cfg;
+extern struct iwl_cfg iwl6000i_2abg_cfg;
+extern struct iwl_cfg iwl6000i_2bg_cfg;
+extern struct iwl_cfg iwl6000_3agn_cfg;
+extern struct iwl_cfg iwl6050_2agn_cfg;
+extern struct iwl_cfg iwl6050_2abg_cfg;
+extern struct iwl_cfg iwl1000_bgn_cfg;
+extern struct iwl_cfg iwl1000_bg_cfg;
+
extern struct iwl_mod_params iwlagn_mod_params;
extern struct iwl_hcmd_ops iwlagn_hcmd;
extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
@@ -93,6 +120,8 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo);
void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed);
/* uCode */
int iwlagn_load_ucode(struct iwl_priv *priv);
@@ -102,6 +131,7 @@ void iwlagn_rx_calib_complete(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwlagn_init_alive_start(struct iwl_priv *priv);
int iwlagn_alive_notify(struct iwl_priv *priv);
+int iwl_verify_ucode(struct iwl_priv *priv);
/* lib */
void iwl_check_abort_status(struct iwl_priv *priv,
@@ -178,4 +208,8 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
int iwlagn_manage_ibss_station(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add);
+/* hcmd */
+int iwlagn_send_rxon_assoc(struct iwl_priv *priv);
+int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
+
#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 7e8227773213..22fa947e8756 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -846,6 +846,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
}
}
+ if (active_chains != priv->hw_params.valid_rx_ant &&
+ active_chains != priv->chain_noise_data.active_chains)
+ IWL_WARN(priv,
+ "Detected that not all antennas are connected! "
+ "Connected: %#x, valid: %#x.\n",
+ active_chains, priv->hw_params.valid_rx_ant);
+
/* Save for use within RXON, TX, SCAN commands, etc. */
priv->chain_noise_data.active_chains = active_chains;
IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 9aab020c474b..28b1098334f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -95,7 +95,7 @@ enum {
/* Multi-Station support */
REPLY_ADD_STA = 0x18,
- REPLY_REMOVE_STA = 0x19, /* not used */
+ REPLY_REMOVE_STA = 0x19,
REPLY_REMOVE_ALL_STA = 0x1a, /* not used */
/* Security */
@@ -227,7 +227,7 @@ struct iwl_cmd_header {
/* command or response/notification data follows immediately */
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
/**
@@ -247,7 +247,7 @@ struct iwl_cmd_header {
struct iwl3945_tx_power {
u8 tx_gain; /* gain for analog radio */
u8 dsp_atten; /* gain for DSP */
-} __attribute__ ((packed));
+} __packed;
/**
* struct iwl3945_power_per_rate
@@ -258,7 +258,7 @@ struct iwl3945_power_per_rate {
u8 rate; /* plcp */
struct iwl3945_tx_power tpc;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
/**
* iwlagn rate_n_flags bit fields
@@ -389,7 +389,7 @@ union iwl4965_tx_power_dual_stream {
*/
struct tx_power_dual_stream {
__le32 dw;
-} __attribute__ ((packed));
+} __packed;
/**
* struct iwl4965_tx_power_db
@@ -398,7 +398,7 @@ struct tx_power_dual_stream {
*/
struct iwl4965_tx_power_db {
struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
-} __attribute__ ((packed));
+} __packed;
/**
* Command REPLY_TX_POWER_DBM_CMD = 0x98
@@ -412,7 +412,7 @@ struct iwl5000_tx_power_dbm_cmd {
u8 flags;
s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
/**
* Command TX_ANT_CONFIGURATION_CMD = 0x98
@@ -422,7 +422,7 @@ struct iwl5000_tx_power_dbm_cmd {
*/
struct iwl_tx_ant_config_cmd {
__le32 valid;
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
* (0a)
@@ -478,7 +478,7 @@ struct iwl_init_alive_resp {
__le32 therm_r4[2]; /* signed */
__le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
* 2 Tx chains */
-} __attribute__ ((packed));
+} __packed;
/**
@@ -570,7 +570,7 @@ struct iwl_alive_resp {
__le32 error_event_table_ptr; /* SRAM address for error log */
__le32 timestamp;
__le32 is_valid;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_ERROR = 0x2 (response only, not a command)
@@ -582,7 +582,7 @@ struct iwl_error_resp {
__le16 bad_cmd_seq_num;
__le32 error_info;
__le64 timestamp;
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
* (1)
@@ -718,7 +718,7 @@ struct iwl3945_rxon_cmd {
__le32 filter_flags;
__le16 channel;
__le16 reserved5;
-} __attribute__ ((packed));
+} __packed;
struct iwl4965_rxon_cmd {
u8 node_addr[6];
@@ -738,7 +738,7 @@ struct iwl4965_rxon_cmd {
__le16 channel;
u8 ofdm_ht_single_stream_basic_rates;
u8 ofdm_ht_dual_stream_basic_rates;
-} __attribute__ ((packed));
+} __packed;
/* 5000 HW just extend this command */
struct iwl_rxon_cmd {
@@ -763,7 +763,7 @@ struct iwl_rxon_cmd {
u8 reserved5;
__le16 acquisition_data;
__le16 reserved6;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
@@ -774,7 +774,7 @@ struct iwl3945_rxon_assoc_cmd {
u8 ofdm_basic_rates;
u8 cck_basic_rates;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwl4965_rxon_assoc_cmd {
__le32 flags;
@@ -785,7 +785,7 @@ struct iwl4965_rxon_assoc_cmd {
u8 ofdm_ht_dual_stream_basic_rates;
__le16 rx_chain_select_flags;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwl5000_rxon_assoc_cmd {
__le32 flags;
@@ -800,7 +800,7 @@ struct iwl5000_rxon_assoc_cmd {
__le16 rx_chain_select_flags;
__le16 acquisition_data;
__le32 reserved3;
-} __attribute__ ((packed));
+} __packed;
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
@@ -816,7 +816,7 @@ struct iwl_rxon_time_cmd {
__le32 beacon_init_val;
__le16 listen_interval;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
@@ -829,7 +829,7 @@ struct iwl3945_channel_switch_cmd {
__le32 rxon_filter_flags;
__le32 switch_time;
struct iwl3945_power_per_rate power[IWL_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
struct iwl4965_channel_switch_cmd {
u8 band;
@@ -839,7 +839,7 @@ struct iwl4965_channel_switch_cmd {
__le32 rxon_filter_flags;
__le32 switch_time;
struct iwl4965_tx_power_db tx_power;
-} __attribute__ ((packed));
+} __packed;
/**
* struct iwl5000_channel_switch_cmd
@@ -860,7 +860,7 @@ struct iwl5000_channel_switch_cmd {
__le32 rxon_filter_flags;
__le32 switch_time;
__le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
-} __attribute__ ((packed));
+} __packed;
/**
* struct iwl6000_channel_switch_cmd
@@ -881,7 +881,7 @@ struct iwl6000_channel_switch_cmd {
__le32 rxon_filter_flags;
__le32 switch_time;
__le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
-} __attribute__ ((packed));
+} __packed;
/*
* CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
@@ -890,7 +890,7 @@ struct iwl_csa_notification {
__le16 band;
__le16 channel;
__le32 status; /* 0 - OK, 1 - fail */
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
* (2)
@@ -920,7 +920,7 @@ struct iwl_ac_qos {
u8 aifsn;
u8 reserved1;
__le16 edca_txop;
-} __attribute__ ((packed));
+} __packed;
/* QoS flags defines */
#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
@@ -939,7 +939,7 @@ struct iwl_ac_qos {
struct iwl_qosparam_cmd {
__le32 qos_flags;
struct iwl_ac_qos ac[AC_NUM];
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
* (3)
@@ -952,7 +952,6 @@ struct iwl_qosparam_cmd {
/* Special, dedicated locations within device's station table */
#define IWL_AP_ID 0
-#define IWL_MULTICAST_ID 1
#define IWL_STA_ID 2
#define IWL3945_BROADCAST_ID 24
#define IWL3945_STATION_COUNT 25
@@ -1015,7 +1014,7 @@ struct iwl4965_keyinfo {
u8 key_offset;
u8 reserved2;
u8 key[16]; /* 16-byte unicast decryption key */
-} __attribute__ ((packed));
+} __packed;
/* 5000 */
struct iwl_keyinfo {
@@ -1029,7 +1028,7 @@ struct iwl_keyinfo {
__le64 tx_secur_seq_cnt;
__le64 hw_tkip_mic_rx_key;
__le64 hw_tkip_mic_tx_key;
-} __attribute__ ((packed));
+} __packed;
/**
* struct sta_id_modify
@@ -1049,7 +1048,7 @@ struct sta_id_modify {
u8 sta_id;
u8 modify_mask;
__le16 reserved2;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_ADD_STA = 0x18 (command)
@@ -1103,7 +1102,7 @@ struct iwl3945_addsta_cmd {
/* Starting Sequence Number for added block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
__le16 add_immediate_ba_ssn;
-} __attribute__ ((packed));
+} __packed;
struct iwl4965_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
@@ -1140,7 +1139,7 @@ struct iwl4965_addsta_cmd {
__le16 sleep_tx_count;
__le16 reserved2;
-} __attribute__ ((packed));
+} __packed;
/* 5000 */
struct iwl_addsta_cmd {
@@ -1178,7 +1177,7 @@ struct iwl_addsta_cmd {
__le16 sleep_tx_count;
__le16 reserved2;
-} __attribute__ ((packed));
+} __packed;
#define ADD_STA_SUCCESS_MSK 0x1
@@ -1190,7 +1189,7 @@ struct iwl_addsta_cmd {
*/
struct iwl_add_sta_resp {
u8 status; /* ADD_STA_* */
-} __attribute__ ((packed));
+} __packed;
#define REM_STA_SUCCESS_MSK 0x1
/*
@@ -1198,7 +1197,7 @@ struct iwl_add_sta_resp {
*/
struct iwl_rem_sta_resp {
u8 status;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_REM_STA = 0x19 (command)
@@ -1208,7 +1207,7 @@ struct iwl_rem_sta_cmd {
u8 reserved[3];
u8 addr[ETH_ALEN]; /* MAC addr of the first station */
u8 reserved2[2];
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_WEP_KEY = 0x20
@@ -1220,7 +1219,7 @@ struct iwl_wep_key {
u8 key_size;
u8 reserved2[3];
u8 key[16];
-} __attribute__ ((packed));
+} __packed;
struct iwl_wep_cmd {
u8 num_keys;
@@ -1228,7 +1227,7 @@ struct iwl_wep_cmd {
u8 flags;
u8 reserved;
struct iwl_wep_key key[0];
-} __attribute__ ((packed));
+} __packed;
#define WEP_KEY_WEP_TYPE 1
#define WEP_KEYS_MAX 4
@@ -1282,7 +1281,7 @@ struct iwl3945_rx_frame_stats {
__le16 sig_avg;
__le16 noise_diff;
u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
struct iwl3945_rx_frame_hdr {
__le16 channel;
@@ -1291,13 +1290,13 @@ struct iwl3945_rx_frame_hdr {
u8 rate;
__le16 len;
u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
struct iwl3945_rx_frame_end {
__le32 status;
__le64 timestamp;
__le32 beacon_timestamp;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_3945_RX = 0x1b (response only, not a command)
@@ -1311,7 +1310,7 @@ struct iwl3945_rx_frame {
struct iwl3945_rx_frame_stats stats;
struct iwl3945_rx_frame_hdr hdr;
struct iwl3945_rx_frame_end end;
-} __attribute__ ((packed));
+} __packed;
#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
@@ -1327,7 +1326,7 @@ struct iwl4965_rx_non_cfg_phy {
__le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
u8 pad[0];
-} __attribute__ ((packed));
+} __packed;
#define IWL50_RX_RES_PHY_CNT 8
@@ -1345,7 +1344,7 @@ struct iwl4965_rx_non_cfg_phy {
struct iwl5000_non_cfg_phy {
__le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */
-} __attribute__ ((packed));
+} __packed;
/*
@@ -1365,12 +1364,12 @@ struct iwl_rx_phy_res {
__le32 rate_n_flags; /* RATE_MCS_* */
__le16 byte_count; /* frame's byte-count */
__le16 reserved3;
-} __attribute__ ((packed));
+} __packed;
-struct iwl4965_rx_mpdu_res_start {
+struct iwl_rx_mpdu_res_start {
__le16 byte_count;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
@@ -1557,7 +1556,7 @@ struct iwl3945_tx_cmd {
*/
u8 payload[0];
struct ieee80211_hdr hdr[0];
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_TX = 0x1c (response)
@@ -1569,7 +1568,7 @@ struct iwl3945_tx_resp {
u8 rate;
__le32 wireless_media_time;
__le32 status; /* TX status */
-} __attribute__ ((packed));
+} __packed;
/*
@@ -1581,7 +1580,7 @@ struct iwl_dram_scratch {
u8 try_cnt; /* Tx attempts */
u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwl_tx_cmd {
/*
@@ -1660,7 +1659,7 @@ struct iwl_tx_cmd {
*/
u8 payload[0];
struct ieee80211_hdr hdr[0];
-} __attribute__ ((packed));
+} __packed;
/* TX command response is sent after *3945* transmission attempts.
*
@@ -1826,7 +1825,7 @@ enum {
struct agg_tx_status {
__le16 status;
__le16 sequence;
-} __attribute__ ((packed));
+} __packed;
struct iwl4965_tx_resp {
u8 frame_count; /* 1 no aggregation, >1 aggregation */
@@ -1863,7 +1862,7 @@ struct iwl4965_tx_resp {
__le32 status;
struct agg_tx_status agg_status[0]; /* for each agg frame */
} u;
-} __attribute__ ((packed));
+} __packed;
/*
* definitions for initial rate index field
@@ -1927,7 +1926,7 @@ struct iwl5000_tx_resp {
*/
struct agg_tx_status status; /* TX status (in aggregation -
* status of 1st frame) */
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
*
@@ -1945,7 +1944,7 @@ struct iwl_compressed_ba_resp {
__le64 bitmap;
__le16 scd_flow;
__le16 scd_ssn;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
@@ -1958,14 +1957,14 @@ struct iwl3945_txpowertable_cmd {
u8 reserved;
__le16 channel;
struct iwl3945_power_per_rate power[IWL_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
struct iwl4965_txpowertable_cmd {
u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
u8 reserved;
__le16 channel;
struct iwl4965_tx_power_db tx_power;
-} __attribute__ ((packed));
+} __packed;
/**
@@ -1987,13 +1986,13 @@ struct iwl3945_rate_scaling_info {
__le16 rate_n_flags;
u8 try_cnt;
u8 next_rate_index;
-} __attribute__ ((packed));
+} __packed;
struct iwl3945_rate_scaling_cmd {
u8 table_id;
u8 reserved[3];
struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
@@ -2040,7 +2039,7 @@ struct iwl_link_qual_general_params {
* TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
*/
u8 start_rate_index[LINK_QUAL_AC_NUM];
-} __attribute__ ((packed));
+} __packed;
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535)
@@ -2081,7 +2080,7 @@ struct iwl_link_qual_agg_params {
u8 agg_frame_cnt_limit;
__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
@@ -2287,7 +2286,7 @@ struct iwl_link_quality_cmd {
__le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
} rs_table[LINK_QUAL_MAX_RETRY_NUM];
__le32 reserved2;
-} __attribute__ ((packed));
+} __packed;
/*
* BT configuration enable flags:
@@ -2328,7 +2327,7 @@ struct iwl_bt_cmd {
u8 reserved;
__le32 kill_ack_mask;
__le32 kill_cts_mask;
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
* (6)
@@ -2353,7 +2352,7 @@ struct iwl_measure_channel {
u8 channel; /* channel to measure */
u8 type; /* see enum iwl_measure_type */
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
@@ -2372,7 +2371,7 @@ struct iwl_spectrum_cmd {
__le16 channel_count; /* minimum 1, maximum 10 */
__le16 reserved3;
struct iwl_measure_channel channels[10];
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
@@ -2383,7 +2382,7 @@ struct iwl_spectrum_resp {
__le16 status; /* 0 - command will be handled
* 1 - cannot handle (conflicts with another
* measurement) */
-} __attribute__ ((packed));
+} __packed;
enum iwl_measurement_state {
IWL_MEASUREMENT_START = 0,
@@ -2406,13 +2405,13 @@ enum iwl_measurement_status {
struct iwl_measurement_histogram {
__le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
__le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
-} __attribute__ ((packed));
+} __packed;
/* clear channel availability counters */
struct iwl_measurement_cca_counters {
__le32 ofdm;
__le32 cck;
-} __attribute__ ((packed));
+} __packed;
enum iwl_measure_type {
IWL_MEASURE_BASIC = (1 << 0),
@@ -2448,7 +2447,7 @@ struct iwl_spectrum_notification {
struct iwl_measurement_histogram histogram;
__le32 stop_time; /* lower 32-bits of TSF */
__le32 status; /* see iwl_measurement_status */
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
* (7)
@@ -2504,7 +2503,7 @@ struct iwl3945_powertable_cmd {
__le32 rx_data_timeout;
__le32 tx_data_timeout;
__le32 sleep_interval[IWL_POWER_VEC_SIZE];
-} __attribute__ ((packed));
+} __packed;
struct iwl_powertable_cmd {
__le16 flags;
@@ -2514,7 +2513,7 @@ struct iwl_powertable_cmd {
__le32 tx_data_timeout;
__le32 sleep_interval[IWL_POWER_VEC_SIZE];
__le32 keep_alive_beacons;
-} __attribute__ ((packed));
+} __packed;
/*
* PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
@@ -2527,7 +2526,7 @@ struct iwl_sleep_notification {
__le32 sleep_time;
__le32 tsf_low;
__le32 bcon_timer;
-} __attribute__ ((packed));
+} __packed;
/* Sleep states. 3945 and 4965 identical. */
enum {
@@ -2552,14 +2551,14 @@ enum {
#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */
struct iwl_card_state_cmd {
__le32 status; /* CARD_STATE_CMD_* request new power state */
-} __attribute__ ((packed));
+} __packed;
/*
* CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
*/
struct iwl_card_state_notif {
__le32 flags;
-} __attribute__ ((packed));
+} __packed;
#define HW_CARD_DISABLED 0x01
#define SW_CARD_DISABLED 0x02
@@ -2570,14 +2569,14 @@ struct iwl_ct_kill_config {
__le32 reserved;
__le32 critical_temperature_M;
__le32 critical_temperature_R;
-} __attribute__ ((packed));
+} __packed;
/* 1000, and 6x00 */
struct iwl_ct_kill_throttling_config {
__le32 critical_temperature_exit;
__le32 reserved;
__le32 critical_temperature_enter;
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
* (8)
@@ -2622,7 +2621,7 @@ struct iwl3945_scan_channel {
struct iwl3945_tx_power tpc;
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
__le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
-} __attribute__ ((packed));
+} __packed;
/* set number of direct probes u8 type */
#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
@@ -2641,7 +2640,7 @@ struct iwl_scan_channel {
u8 dsp_atten; /* gain for DSP */
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
__le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
-} __attribute__ ((packed));
+} __packed;
/* set number of direct probes __le32 type */
#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
@@ -2658,7 +2657,7 @@ struct iwl_ssid_ie {
u8 id;
u8 len;
u8 ssid[32];
-} __attribute__ ((packed));
+} __packed;
#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
@@ -2764,7 +2763,7 @@ struct iwl3945_scan_cmd {
* before requesting another scan.
*/
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
struct iwl_scan_cmd {
__le16 len;
@@ -2808,7 +2807,7 @@ struct iwl_scan_cmd {
* before requesting another scan.
*/
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
/* Can abort will notify by complete notification with abort status. */
#define CAN_ABORT_STATUS cpu_to_le32(0x1)
@@ -2820,7 +2819,7 @@ struct iwl_scan_cmd {
*/
struct iwl_scanreq_notification {
__le32 status; /* 1: okay, 2: cannot fulfill request */
-} __attribute__ ((packed));
+} __packed;
/*
* SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
@@ -2833,7 +2832,7 @@ struct iwl_scanstart_notification {
u8 band;
u8 reserved[2];
__le32 status;
-} __attribute__ ((packed));
+} __packed;
#define SCAN_OWNER_STATUS 0x1;
#define MEASURE_OWNER_STATUS 0x2;
@@ -2849,7 +2848,7 @@ struct iwl_scanresults_notification {
__le32 tsf_low;
__le32 tsf_high;
__le32 statistics[NUMBER_OF_STATISTICS];
-} __attribute__ ((packed));
+} __packed;
/*
* SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
@@ -2861,7 +2860,7 @@ struct iwl_scancomplete_notification {
u8 last_channel;
__le32 tsf_low;
__le32 tsf_high;
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
@@ -2879,14 +2878,14 @@ struct iwl3945_beacon_notif {
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
-} __attribute__ ((packed));
+} __packed;
struct iwl4965_beacon_notif {
struct iwl4965_tx_resp beacon_notify_hdr;
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
-} __attribute__ ((packed));
+} __packed;
/*
* REPLY_TX_BEACON = 0x91 (command, has simple generic response)
@@ -2898,7 +2897,7 @@ struct iwl3945_tx_beacon_cmd {
u8 tim_size;
u8 reserved1;
struct ieee80211_hdr frame[0]; /* beacon frame */
-} __attribute__ ((packed));
+} __packed;
struct iwl_tx_beacon_cmd {
struct iwl_tx_cmd tx;
@@ -2906,7 +2905,7 @@ struct iwl_tx_beacon_cmd {
u8 tim_size;
u8 reserved1;
struct ieee80211_hdr frame[0]; /* beacon frame */
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
* (10)
@@ -2932,7 +2931,7 @@ struct rate_histogram {
__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
} failed;
-} __attribute__ ((packed));
+} __packed;
/* statistics command response */
@@ -2952,7 +2951,7 @@ struct iwl39_statistics_rx_phy {
__le32 rxe_frame_limit_overrun;
__le32 sent_ack_cnt;
__le32 sent_cts_cnt;
-} __attribute__ ((packed));
+} __packed;
struct iwl39_statistics_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
@@ -2963,13 +2962,13 @@ struct iwl39_statistics_rx_non_phy {
* filtering process */
__le32 non_channel_beacons; /* beacons with our bss id but not on
* our serving channel */
-} __attribute__ ((packed));
+} __packed;
struct iwl39_statistics_rx {
struct iwl39_statistics_rx_phy ofdm;
struct iwl39_statistics_rx_phy cck;
struct iwl39_statistics_rx_non_phy general;
-} __attribute__ ((packed));
+} __packed;
struct iwl39_statistics_tx {
__le32 preamble_cnt;
@@ -2981,20 +2980,20 @@ struct iwl39_statistics_tx {
__le32 ack_timeout;
__le32 expected_ack_cnt;
__le32 actual_ack_cnt;
-} __attribute__ ((packed));
+} __packed;
struct statistics_dbg {
__le32 burst_check;
__le32 burst_count;
__le32 reserved[4];
-} __attribute__ ((packed));
+} __packed;
struct iwl39_statistics_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
__le32 probe_time;
-} __attribute__ ((packed));
+} __packed;
struct iwl39_statistics_general {
__le32 temperature;
@@ -3004,7 +3003,7 @@ struct iwl39_statistics_general {
__le32 slots_idle;
__le32 ttl_timestamp;
struct iwl39_statistics_div div;
-} __attribute__ ((packed));
+} __packed;
struct statistics_rx_phy {
__le32 ina_cnt;
@@ -3027,7 +3026,7 @@ struct statistics_rx_phy {
__le32 mh_format_err;
__le32 re_acq_main_rssi_sum;
__le32 reserved3;
-} __attribute__ ((packed));
+} __packed;
struct statistics_rx_ht_phy {
__le32 plcp_err;
@@ -3040,7 +3039,7 @@ struct statistics_rx_ht_phy {
__le32 agg_mpdu_cnt;
__le32 agg_cnt;
__le32 unsupport_mcs;
-} __attribute__ ((packed));
+} __packed;
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
@@ -3075,14 +3074,14 @@ struct statistics_rx_non_phy {
__le32 beacon_energy_a;
__le32 beacon_energy_b;
__le32 beacon_energy_c;
-} __attribute__ ((packed));
+} __packed;
struct statistics_rx {
struct statistics_rx_phy ofdm;
struct statistics_rx_phy cck;
struct statistics_rx_non_phy general;
struct statistics_rx_ht_phy ofdm_ht;
-} __attribute__ ((packed));
+} __packed;
/**
* struct statistics_tx_power - current tx power
@@ -3096,7 +3095,7 @@ struct statistics_tx_power {
u8 ant_b;
u8 ant_c;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
struct statistics_tx_non_phy_agg {
__le32 ba_timeout;
@@ -3109,7 +3108,7 @@ struct statistics_tx_non_phy_agg {
__le32 underrun;
__le32 bt_prio_kill;
__le32 rx_ba_rsp_cnt;
-} __attribute__ ((packed));
+} __packed;
struct statistics_tx {
__le32 preamble_cnt;
@@ -3134,7 +3133,7 @@ struct statistics_tx {
*/
struct statistics_tx_power tx_power;
__le32 reserved1;
-} __attribute__ ((packed));
+} __packed;
struct statistics_div {
@@ -3144,7 +3143,7 @@ struct statistics_div {
__le32 probe_time;
__le32 reserved1;
__le32 reserved2;
-} __attribute__ ((packed));
+} __packed;
struct statistics_general {
__le32 temperature; /* radio temperature */
@@ -3164,7 +3163,7 @@ struct statistics_general {
__le32 num_of_sos_states;
__le32 reserved2;
__le32 reserved3;
-} __attribute__ ((packed));
+} __packed;
#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
@@ -3189,7 +3188,7 @@ struct statistics_general {
#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
struct iwl_statistics_cmd {
__le32 configuration_flags; /* IWL_STATS_CONF_* */
-} __attribute__ ((packed));
+} __packed;
/*
* STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
@@ -3214,14 +3213,14 @@ struct iwl3945_notif_statistics {
struct iwl39_statistics_rx rx;
struct iwl39_statistics_tx tx;
struct iwl39_statistics_general general;
-} __attribute__ ((packed));
+} __packed;
struct iwl_notif_statistics {
__le32 flag;
struct statistics_rx rx;
struct statistics_tx tx;
struct statistics_general general;
-} __attribute__ ((packed));
+} __packed;
/*
@@ -3253,7 +3252,7 @@ struct iwl_missed_beacon_notif {
__le32 total_missed_becons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
@@ -3455,7 +3454,7 @@ struct iwl_missed_beacon_notif {
struct iwl_sensitivity_cmd {
__le16 control; /* always use "1" */
__le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
-} __attribute__ ((packed));
+} __packed;
/**
@@ -3536,31 +3535,31 @@ struct iwl_calib_cfg_elmnt_s {
__le32 send_res;
__le32 apply_res;
__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwl_calib_cfg_status_s {
struct iwl_calib_cfg_elmnt_s once;
struct iwl_calib_cfg_elmnt_s perd;
__le32 flags;
-} __attribute__ ((packed));
+} __packed;
struct iwl_calib_cfg_cmd {
struct iwl_calib_cfg_status_s ucd_calib_cfg;
struct iwl_calib_cfg_status_s drv_calib_cfg;
__le32 reserved1;
-} __attribute__ ((packed));
+} __packed;
struct iwl_calib_hdr {
u8 op_code;
u8 first_group;
u8 groups_num;
u8 data_valid;
-} __attribute__ ((packed));
+} __packed;
struct iwl_calib_cmd {
struct iwl_calib_hdr hdr;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
struct iwl_calib_diff_gain_cmd {
@@ -3569,14 +3568,14 @@ struct iwl_calib_diff_gain_cmd {
s8 diff_gain_b;
s8 diff_gain_c;
u8 reserved1;
-} __attribute__ ((packed));
+} __packed;
struct iwl_calib_xtal_freq_cmd {
struct iwl_calib_hdr hdr;
u8 cap_pin1;
u8 cap_pin2;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
struct iwl_calib_chain_noise_reset_cmd {
@@ -3590,7 +3589,7 @@ struct iwl_calib_chain_noise_gain_cmd {
u8 delta_gain_1;
u8 delta_gain_2;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
* (12)
@@ -3613,7 +3612,7 @@ struct iwl_led_cmd {
u8 on; /* # intervals on while blinking;
* "0", regardless of "off", turns LED off */
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
/*
* station priority table entries
@@ -3749,7 +3748,7 @@ struct iwl_wimax_coex_event_entry {
u8 win_medium_prio;
u8 reserved;
u8 flags;
-} __attribute__ ((packed));
+} __packed;
/* COEX flag masks */
@@ -3766,7 +3765,7 @@ struct iwl_wimax_coex_cmd {
u8 flags;
u8 reserved[3];
struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
-} __attribute__ ((packed));
+} __packed;
/*
* Coexistence MEDIUM NOTIFICATION
@@ -3795,7 +3794,7 @@ struct iwl_wimax_coex_cmd {
struct iwl_coex_medium_notification {
__le32 status;
__le32 events;
-} __attribute__ ((packed));
+} __packed;
/*
* Coexistence EVENT Command
@@ -3810,11 +3809,11 @@ struct iwl_coex_event_cmd {
u8 flags;
u8 event;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwl_coex_event_resp {
__le32 status;
-} __attribute__ ((packed));
+} __packed;
/******************************************************************************
@@ -3858,7 +3857,7 @@ struct iwl_rx_packet {
__le32 status;
u8 raw[0];
} u;
-} __attribute__ ((packed));
+} __packed;
int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 426e95567de3..62c50bc0089a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -141,13 +141,14 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
}
EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
-u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
+u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
{
int i;
u8 ind = ant;
+
for (i = 0; i < RATE_ANT_NUM - 1; i++) {
ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
- if (priv->hw_params.valid_tx_ant & BIT(ind))
+ if (valid & BIT(ind))
return ind;
}
return ant;
@@ -457,7 +458,7 @@ u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
if (!sta_ht_inf->ht_supported)
return 0;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
if (priv->disable_ht40)
return 0;
#endif
@@ -506,11 +507,11 @@ void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif)
}
beacon_int = iwl_adjust_beacon_interval(beacon_int,
- priv->hw_params.max_beacon_itrvl * 1024);
+ priv->hw_params.max_beacon_itrvl * TIME_UNIT);
priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
- interval_tm = beacon_int * 1024;
+ interval_tm = beacon_int * TIME_UNIT;
rem = do_div(tsf, interval_tm);
priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
@@ -932,9 +933,9 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
}
EXPORT_SYMBOL(iwl_set_rxon_channel);
-static void iwl_set_flags_for_band(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
+void iwl_set_flags_for_band(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
{
if (band == IEEE80211_BAND_5GHZ) {
priv->staging_rxon.flags &=
@@ -943,19 +944,17 @@ static void iwl_set_flags_for_band(struct iwl_priv *priv,
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
} else {
/* Copied from iwl_post_associate() */
- if (vif && vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif && vif->bss_conf.use_short_slot)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (vif && vif->type == NL80211_IFTYPE_ADHOC)
- priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
}
}
+EXPORT_SYMBOL(iwl_set_flags_for_band);
/*
* initialize rxon structure with default values from eeprom
@@ -1021,15 +1020,17 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
/* clear both MIX and PURE40 mode flag */
priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
RXON_FLG_CHANNEL_MODE_PURE_40);
- memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
- memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
+
+ if (vif)
+ memcpy(priv->staging_rxon.node_addr, vif->addr, ETH_ALEN);
+
priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
}
EXPORT_SYMBOL(iwl_connection_init_rx_config);
-static void iwl_set_rate(struct iwl_priv *priv)
+void iwl_set_rate(struct iwl_priv *priv)
{
const struct ieee80211_supported_band *hw = NULL;
struct ieee80211_rate *rate;
@@ -1057,6 +1058,21 @@ static void iwl_set_rate(struct iwl_priv *priv)
priv->staging_rxon.ofdm_basic_rates =
(IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
}
+EXPORT_SYMBOL(iwl_set_rate);
+
+void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (priv->switch_rxon.switch_in_progress) {
+ ieee80211_chswitch_done(priv->vif, is_success);
+ mutex_lock(&priv->mutex);
+ priv->switch_rxon.switch_in_progress = false;
+ mutex_unlock(&priv->mutex);
+ }
+}
+EXPORT_SYMBOL(iwl_chswitch_done);
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
@@ -1071,11 +1087,12 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
priv->staging_rxon.channel = csa->channel;
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
- } else
+ iwl_chswitch_done(priv, true);
+ } else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel));
-
- priv->switch_rxon.switch_in_progress = false;
+ iwl_chswitch_done(priv, false);
+ }
}
}
EXPORT_SYMBOL(iwl_rx_csa);
@@ -1502,130 +1519,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
}
EXPORT_SYMBOL(iwl_send_statistics_request);
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- int ret = 0;
- u32 errcnt = 0;
- u32 i;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- i + IWL49_RTC_INST_LOWER_BOUND);
- val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- ret = -EIO;
- errcnt++;
- if (errcnt >= 3)
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
- * looking at all data.
- */
-static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
- u32 len)
-{
- u32 val;
- u32 save_len = len;
- int ret = 0;
- u32 errcnt;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- IWL49_RTC_INST_LOWER_BOUND);
-
- errcnt = 0;
- for (; len > 0; len -= sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- save_len - len, val, le32_to_cpu(*image));
- ret = -EIO;
- errcnt++;
- if (errcnt >= 20)
- break;
- }
- }
-
- if (!errcnt)
- IWL_DEBUG_INFO(priv,
- "ucode image in INSTRUCTION memory is good\n");
-
- return ret;
-}
-
-/**
- * iwl_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-int iwl_verify_ucode(struct iwl_priv *priv)
-{
- __le32 *image;
- u32 len;
- int ret;
-
- /* Try bootstrap */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwlcore_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try initialize */
- image = (__le32 *)priv->ucode_init.v_addr;
- len = priv->ucode_init.len;
- ret = iwlcore_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try runtime/protocol */
- image = (__le32 *)priv->ucode_code.v_addr;
- len = priv->ucode_code.len;
- ret = iwlcore_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
- /* Since nothing seems to match, show first several data entries in
- * instruction SRAM, so maybe visual inspection will give a clue.
- * Selection of bootstrap image (vs. other images) is arbitrary. */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwl_verify_inst_full(priv, image, len);
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_verify_ucode);
-
-
void iwl_rf_kill_ct_config(struct iwl_priv *priv)
{
struct iwl_ct_kill_config cmd;
@@ -2046,8 +1939,6 @@ static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
- memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
-
return iwlcore_commit_rxon(priv);
}
@@ -2056,7 +1947,8 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct iwl_priv *priv = hw->priv;
int err = 0;
- IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
+ IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+ vif->type, vif->addr);
mutex_lock(&priv->mutex);
@@ -2074,9 +1966,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
priv->vif = vif;
priv->iw_mode = vif->type;
- IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
- memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
-
err = iwl_set_mode(priv, vif);
if (err)
goto out_err;
@@ -2110,6 +1999,11 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
}
if (priv->vif == vif) {
priv->vif = NULL;
+ if (priv->scan_vif == vif) {
+ ieee80211_scan_completed(priv->hw, true);
+ priv->scan_vif = NULL;
+ priv->scan_request = NULL;
+ }
memset(priv->bssid, 0, ETH_ALEN);
}
mutex_unlock(&priv->mutex);
@@ -2210,22 +2104,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
spin_unlock_irqrestore(&priv->lock, flags);
- if (iwl_is_associated(priv) &&
- (le16_to_cpu(priv->active_rxon.channel) != ch) &&
- priv->cfg->ops->lib->set_channel_switch) {
- iwl_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- ret = priv->cfg->ops->lib->set_channel_switch(priv,
- ch);
- if (!ret) {
- iwl_print_rx_config_cmd(priv);
- goto out;
- }
- priv->switch_rxon.switch_in_progress = false;
- }
+
set_ch_out:
/* The list of supported rates and rate mask can be different
* for each band; since the band may have changed, reset
@@ -2583,7 +2462,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
EXPORT_SYMBOL(iwl_update_stats);
#endif
-const static char *get_csr_string(int cmd)
+static const char *get_csr_string(int cmd)
{
switch (cmd) {
IWL_CMD(CSR_HW_IF_CONFIG_REG);
@@ -2654,7 +2533,7 @@ void iwl_dump_csr(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_dump_csr);
-const static char *get_fh_string(int cmd)
+static const char *get_fh_string(int cmd)
{
switch (cmd) {
IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
@@ -2876,6 +2755,61 @@ void iwl_bg_monitor_recover(unsigned long data)
}
EXPORT_SYMBOL(iwl_bg_monitor_recover);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
+{
+ u32 quot;
+ u32 rem;
+ u32 interval = beacon_interval * TIME_UNIT;
+
+ if (!interval || !usec)
+ return 0;
+
+ quot = (usec / interval) &
+ (iwl_beacon_time_mask_high(priv,
+ priv->hw_params.beacon_time_tsf_bits) >>
+ priv->hw_params.beacon_time_tsf_bits);
+ rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
+ priv->hw_params.beacon_time_tsf_bits);
+
+ return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(iwl_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
+ u32 addon, u32 beacon_interval)
+{
+ u32 base_low = base & iwl_beacon_time_mask_low(priv,
+ priv->hw_params.beacon_time_tsf_bits);
+ u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
+ priv->hw_params.beacon_time_tsf_bits);
+ u32 interval = beacon_interval * TIME_UNIT;
+ u32 res = (base & iwl_beacon_time_mask_high(priv,
+ priv->hw_params.beacon_time_tsf_bits)) +
+ (addon & iwl_beacon_time_mask_high(priv,
+ priv->hw_params.beacon_time_tsf_bits));
+
+ if (base_low > addon_low)
+ res += base_low - addon_low;
+ else if (base_low < addon_low) {
+ res += interval + base_low - addon_low;
+ res += (1 << priv->hw_params.beacon_time_tsf_bits);
+ } else
+ res += (1 << priv->hw_params.beacon_time_tsf_bits);
+
+ return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(iwl_add_beacon_time);
+
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 31775bd9c361..76288c56a7d7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -79,6 +79,8 @@ struct iwl_cmd;
.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
.driver_data = (kernel_ulong_t)&(cfg)
+#define TIME_UNIT 1024
+
#define IWL_SKU_G 0x1
#define IWL_SKU_A 0x2
#define IWL_SKU_N 0x8
@@ -173,7 +175,8 @@ struct iwl_lib_ops {
void (*dump_nic_error_log)(struct iwl_priv *priv);
void (*dump_csr)(struct iwl_priv *priv);
int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
- int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
+ int (*set_channel_switch)(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch);
/* power management */
struct iwl_apm_ops apm_ops;
@@ -325,7 +328,8 @@ struct iwl_cfg {
const bool ucode_tracing;
const bool sensitivity_calib_by_driver;
const bool chain_noise_calib_by_driver;
- u8 scan_antennas[IEEE80211_NUM_BANDS];
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
};
/***************************
@@ -343,6 +347,9 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv);
int iwl_full_rxon_required(struct iwl_priv *priv);
void iwl_set_rxon_chain(struct iwl_priv *priv);
int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
+void iwl_set_flags_for_band(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif);
u8 iwl_get_single_channel_number(struct iwl_priv *priv,
enum ieee80211_band band);
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
@@ -350,6 +357,7 @@ u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *sta_ht_inf);
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct ieee80211_vif *vif);
+void iwl_set_rate(struct iwl_priv *priv);
int iwl_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
@@ -461,6 +469,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* TX helpers */
@@ -474,8 +483,6 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
dma_addr_t addr, u16 len, u8 reset, u8 pad);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
-void iwl_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed);
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
@@ -495,7 +502,7 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
-u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx);
+u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
{
@@ -528,7 +535,7 @@ void iwl_bg_start_internal_scan(struct work_struct *work);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ie, int ie_len, int left);
+ const u8 *ta, const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
@@ -595,6 +602,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
}
void iwl_bg_monitor_recover(unsigned long data);
+u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
+__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
+ u32 addon, u32 beacon_interval);
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
@@ -693,7 +703,6 @@ extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
extern void iwl_send_bt_config(struct iwl_priv *priv);
extern int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
-extern int iwl_verify_ucode(struct iwl_priv *priv);
extern int iwl_send_lq_cmd(struct iwl_priv *priv,
struct iwl_link_quality_cmd *lq, u8 flags, bool init);
void iwl_apm_stop(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 9659c5d01df9..cee3d12eb383 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -106,27 +106,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
.open = iwl_dbgfs_open_file_generic, \
};
-int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
- int p = 0;
-
- p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
- le32_to_cpu(priv->statistics.flag));
- if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
- UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
- UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
- return p;
-}
-EXPORT_SYMBOL(iwl_dbgfs_statistics_flag);
-
static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
@@ -330,45 +309,35 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
for (i = 0; i < max_sta; i++) {
station = &priv->stations[i];
- if (station->used) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "station %d:\ngeneral data:\n", i+1);
- pos += scnprintf(buf + pos, bufsz - pos, "id: %u\n",
- station->sta.sta.sta_id);
- pos += scnprintf(buf + pos, bufsz - pos, "mode: %u\n",
- station->sta.mode);
- pos += scnprintf(buf + pos, bufsz - pos,
- "flags: 0x%x\n",
- station->sta.station_flags_msk);
- pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "seq_num\t\ttxq_id");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tframe_count\twait_for_ba\t");
- pos += scnprintf(buf + pos, bufsz - pos,
- "start_idx\tbitmap0\t");
- pos += scnprintf(buf + pos, bufsz - pos,
- "bitmap1\trate_n_flags");
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ if (!station->used)
+ continue;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "station %d - addr: %pM, flags: %#x\n",
+ i, station->sta.sta.addr,
+ station->sta.station_flags_msk);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "start_idx\tbitmap\t\t\trate_n_flags\n");
- for (j = 0; j < MAX_TID_COUNT; j++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "[%d]:\t\t%u", j,
- station->tid[j].seq_number);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%u\t\t%u\t\t%u\t\t",
- station->tid[j].agg.txq_id,
- station->tid[j].agg.frame_count,
- station->tid[j].agg.wait_for_ba);
+ for (j = 0; j < MAX_TID_COUNT; j++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+ j, station->tid[j].seq_number,
+ station->tid[j].agg.txq_id,
+ station->tid[j].agg.frame_count,
+ station->tid[j].tfds_in_queue,
+ station->tid[j].agg.start_idx,
+ station->tid[j].agg.bitmap,
+ station->tid[j].agg.rate_n_flags);
+
+ if (station->tid[j].agg.wait_for_ba)
pos += scnprintf(buf + pos, bufsz - pos,
- "%u\t%llu\t%u",
- station->tid[j].agg.start_idx,
- (unsigned long long)station->tid[j].agg.bitmap,
- station->tid[j].agg.rate_n_flags);
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
+ " - waitforba");
pos += scnprintf(buf + pos, bufsz - pos, "\n");
}
+
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -1049,8 +1018,13 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
rxq->write);
pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
rxq->free_count);
- pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ if (rxq->rb_stts) {
+ pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index f3f3473c5c7e..338b5177029d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -48,25 +48,6 @@
#include "iwl-power.h"
#include "iwl-agn-rs.h"
-/* configuration for the iwl4965 */
-extern struct iwl_cfg iwl4965_agn_cfg;
-extern struct iwl_cfg iwl5300_agn_cfg;
-extern struct iwl_cfg iwl5100_agn_cfg;
-extern struct iwl_cfg iwl5350_agn_cfg;
-extern struct iwl_cfg iwl5100_bgn_cfg;
-extern struct iwl_cfg iwl5100_abg_cfg;
-extern struct iwl_cfg iwl5150_agn_cfg;
-extern struct iwl_cfg iwl5150_abg_cfg;
-extern struct iwl_cfg iwl6000g2a_2agn_cfg;
-extern struct iwl_cfg iwl6000i_2agn_cfg;
-extern struct iwl_cfg iwl6000i_2abg_cfg;
-extern struct iwl_cfg iwl6000i_2bg_cfg;
-extern struct iwl_cfg iwl6000_3agn_cfg;
-extern struct iwl_cfg iwl6050_2agn_cfg;
-extern struct iwl_cfg iwl6050_2abg_cfg;
-extern struct iwl_cfg iwl1000_bgn_cfg;
-extern struct iwl_cfg iwl1000_bg_cfg;
-
struct iwl_tx_queue;
/* CT-KILL constants */
@@ -133,8 +114,8 @@ struct iwl_cmd_meta {
* structure is stored at the end of the shared queue memory. */
u32 flags;
- DECLARE_PCI_UNMAP_ADDR(mapping)
- DECLARE_PCI_UNMAP_LEN(len)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
};
/*
@@ -157,11 +138,11 @@ struct iwl_queue {
* space more than this */
int high_mark; /* high watermark, stop queue if free
* space less than this */
-} __attribute__ ((packed));
+} __packed;
/* One for each TFD */
struct iwl_tx_info {
- struct sk_buff *skb[IWL_NUM_OF_TBS - 1];
+ struct sk_buff *skb;
};
/**
@@ -343,8 +324,8 @@ struct iwl_device_cmd {
struct iwl_tx_cmd tx;
struct iwl6000_channel_switch_cmd chswitch;
u8 payload[DEF_CMD_PAYLOAD_SIZE];
- } __attribute__ ((packed)) cmd;
-} __attribute__ ((packed));
+ } __packed cmd;
+} __packed;
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
@@ -367,7 +348,7 @@ struct iwl_host_cmd {
/**
* struct iwl_rx_queue - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @dma_addr: bus address of buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
@@ -381,7 +362,7 @@ struct iwl_host_cmd {
*/
struct iwl_rx_queue {
__le32 *bd;
- dma_addr_t dma_addr;
+ dma_addr_t bd_dma;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 read;
@@ -433,7 +414,7 @@ struct iwl_ht_agg {
struct iwl_tid_data {
- u16 seq_number;
+ u16 seq_number; /* agn only */
u16 tfds_in_queue;
struct iwl_ht_agg agg;
};
@@ -583,6 +564,12 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_INIT_DATA = 4,
IWL_UCODE_TLV_BOOT = 5,
IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
+ IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
+ IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
+ IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
+ IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11,
+ IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12,
+ IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
};
struct iwl_ucode_tlv {
@@ -590,7 +577,7 @@ struct iwl_ucode_tlv {
__le16 alternative; /* see comment */
__le32 length; /* not including type/length fields */
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
@@ -675,6 +662,7 @@ struct iwl_sensitivity_ranges {
* @sw_crypto: 0 for hw, 1 for sw
* @max_xxx_size: for ucode uses
* @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
* @calib_init_cfg: setup initial calibrations for the hw
* @struct iwl_sensitivity_ranges: range of sensitivity values
*/
@@ -701,6 +689,7 @@ struct iwl_hw_params {
u32 ct_kill_threshold; /* value in hw-dependent units */
u32 ct_kill_exit_threshold; /* value in hw-dependent units */
/* for 1000, 6000 series and up */
+ u16 beacon_time_tsf_bits;
u32 calib_init_cfg;
const struct iwl_sensitivity_ranges *sens;
};
@@ -1075,6 +1064,20 @@ struct iwl_force_reset {
unsigned long last_force_reset_jiffies;
};
+/* extend beacon time format bit shifting */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0 - interval
+ */
+#define IWL3945_EXT_BEACON_TIME_POS 24
+/*
+ * for _agn devices
+ * bits 31:22 - extended
+ * bits 21:0 - interval
+ */
+#define IWLAGN_EXT_BEACON_TIME_POS 22
+
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@@ -1109,7 +1112,7 @@ struct iwl_priv {
/* force reset */
struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
- /* we allocate array of iwl4965_channel_info for NIC's valid channels.
+ /* we allocate array of iwl_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
struct iwl_channel_info *channel_info; /* channel info array */
u8 channel_count; /* # of channels */
@@ -1127,6 +1130,7 @@ struct iwl_priv {
void *scan_cmd;
enum ieee80211_band scan_band;
struct cfg80211_scan_request *scan_request;
+ struct ieee80211_vif *scan_vif;
bool is_internal_short_scan;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
@@ -1174,7 +1178,7 @@ struct iwl_priv {
struct iwl_switch_rxon switch_rxon;
/* 1st responses from initialize and runtime uCode images.
- * 4965's initialize alive response contains some calibration data. */
+ * _agn's initialize alive response contains some calibration data. */
struct iwl_init_alive_resp card_alive_init;
struct iwl_alive_resp card_alive;
@@ -1221,7 +1225,7 @@ struct iwl_priv {
struct iwl_tt_mgmt thermal_throttle;
struct iwl_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_notif_statistics accum_statistics;
struct iwl_notif_statistics delta_statistics;
struct iwl_notif_statistics max_delta;
@@ -1229,9 +1233,10 @@ struct iwl_priv {
/* context information */
u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
- u8 mac_addr[ETH_ALEN];
- /*station table variables */
+ /* station table variables */
+
+ /* Note: if lock and sta_lock are needed, lock must be acquired first */
spinlock_t sta_lock;
int num_stations;
struct iwl_station_entry stations[IWL_STATION_COUNT];
@@ -1273,7 +1278,7 @@ struct iwl_priv {
struct delayed_work rfkill_poll;
struct iwl3945_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl3945_notif_statistics accum_statistics;
struct iwl3945_notif_statistics delta_statistics;
struct iwl3945_notif_statistics max_delta;
@@ -1315,6 +1320,9 @@ struct iwl_priv {
bool last_phy_res_valid;
struct completion firmware_loading_complete;
+
+ u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+ u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
} _agn;
#endif
};
@@ -1353,9 +1361,7 @@ struct iwl_priv {
/* debugging info */
u32 debug_level; /* per device debugging will override global
iwl_debug_level if set */
- u32 framecnt_to_us;
- atomic_t restrict_refcnt;
- bool disable_ht40;
+#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* debugfs */
u16 tx_traffic_idx;
@@ -1364,8 +1370,8 @@ struct iwl_priv {
u8 *rx_traffic;
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
+ bool disable_ht40;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
-#endif /* CONFIG_IWLWIFI_DEBUG */
struct work_struct txpower_work;
u32 disable_sens_cal;
@@ -1419,9 +1425,9 @@ static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
int txq_id, int idx)
{
- if (priv->txq[txq_id].txb[idx].skb[0])
+ if (priv->txq[txq_id].txb[idx].skb)
return (struct ieee80211_hdr *)priv->txq[txq_id].
- txb[idx].skb[0]->data;
+ txb[idx].skb->data;
return NULL;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 95aa202c85e3..5488006491a2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -118,7 +118,7 @@ enum {
struct iwl_eeprom_channel {
u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
-} __attribute__ ((packed));
+} __packed;
/**
* iwl_eeprom_enhanced_txpwr structure
@@ -144,7 +144,7 @@ struct iwl_eeprom_enhanced_txpwr {
s8 reserved;
s8 mimo2_max;
s8 mimo3_max;
-} __attribute__ ((packed));
+} __packed;
/* 3945 Specific */
#define EEPROM_3945_EEPROM_VERSION (0x2f)
@@ -312,7 +312,7 @@ struct iwl_eeprom_calib_measure {
u8 gain_idx; /* Index into gain table */
u8 actual_pow; /* Measured RF output power, half-dBm */
s8 pa_det; /* Power amp detector level (not used) */
-} __attribute__ ((packed));
+} __packed;
/*
@@ -328,7 +328,7 @@ struct iwl_eeprom_calib_ch_info {
struct iwl_eeprom_calib_measure
measurements[EEPROM_TX_POWER_TX_CHAINS]
[EEPROM_TX_POWER_MEASUREMENTS];
-} __attribute__ ((packed));
+} __packed;
/*
* txpower subband info.
@@ -345,7 +345,7 @@ struct iwl_eeprom_calib_subband_info {
u8 ch_to; /* channel number of highest channel in subband */
struct iwl_eeprom_calib_ch_info ch1;
struct iwl_eeprom_calib_ch_info ch2;
-} __attribute__ ((packed));
+} __packed;
/*
@@ -374,7 +374,7 @@ struct iwl_eeprom_calib_info {
__le16 voltage; /* signed */
struct iwl_eeprom_calib_subband_info
band_info[EEPROM_TX_POWER_BANDS];
-} __attribute__ ((packed));
+} __packed;
#define ADDRESS_MSK 0x0000FFFF
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 113c3669b9ce..a3fcbb5f2c70 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -449,7 +449,7 @@ struct iwl_rb_status {
__le16 finished_rb_num;
__le16 finished_fr_nam;
__le32 __unused; /* 3945 only */
-} __attribute__ ((packed));
+} __packed;
#define TFD_QUEUE_SIZE_MAX (256)
@@ -475,7 +475,7 @@ static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
struct iwl_tfd_tb {
__le32 lo;
__le16 hi_n_len;
-} __attribute__((packed));
+} __packed;
/**
* struct iwl_tfd
@@ -510,7 +510,7 @@ struct iwl_tfd {
u8 num_tbs;
struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
__le32 __pad;
-} __attribute__ ((packed));
+} __packed;
/* Keep Warm Size */
#define IWL_KW_SIZE 0x1000 /* 4k */
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 3ff6b9d25a10..621abe3c5afc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -92,6 +92,11 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
+ if (!desc->len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
&desc->p_addr, GFP_KERNEL);
return (desc->v_addr != NULL) ? 0 : -ENOMEM;
@@ -170,4 +175,26 @@ static inline void iwl_enable_interrupts(struct iwl_priv *priv)
iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
}
+/**
+ * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
#endif /* __iwl_helpers_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 0a5d7cf25196..86a353765796 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -175,7 +175,7 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
+ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
@@ -199,7 +199,7 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
err_rb:
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
+ rxq->bd_dma);
err_bd:
return -ENOMEM;
}
@@ -286,7 +286,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
last_rx_noise);
}
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
/*
* based on the assumption of all statistics counter are in DWORD
* FIXME: This function is for debugging, do not deal with
@@ -448,7 +448,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
(pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
iwl_recover_from_statistics(priv, pkt);
@@ -480,7 +480,7 @@ void iwl_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_DEBUGFS
memset(&priv->accum_statistics, 0,
sizeof(struct iwl_notif_statistics));
memset(&priv->delta_statistics, 0,
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 386c5f96eff8..798f93e0ff50 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -333,7 +333,8 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
- if (test_bit(STATUS_SCANNING, &priv->status)) {
+ if (test_bit(STATUS_SCANNING, &priv->status) &&
+ !priv->is_internal_short_scan) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
ret = -EAGAIN;
goto out_unlock;
@@ -348,8 +349,16 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
/* mac80211 will only ask for one band at a time */
priv->scan_band = req->channels[0]->band;
priv->scan_request = req;
+ priv->scan_vif = vif;
- ret = iwl_scan_initiate(priv, vif);
+ /*
+ * If an internal scan is in progress, just set
+ * up the scan_request as per above.
+ */
+ if (priv->is_internal_short_scan)
+ ret = 0;
+ else
+ ret = iwl_scan_initiate(priv, vif);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -438,7 +447,7 @@ EXPORT_SYMBOL(iwl_bg_scan_check);
*/
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ies, int ie_len, int left)
+ const u8 *ta, const u8 *ies, int ie_len, int left)
{
int len = 0;
u8 *pos = NULL;
@@ -451,7 +460,7 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
- memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
+ memcpy(frame->sa, ta, ETH_ALEN);
memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
frame->seq_ctrl = 0;
@@ -514,7 +523,21 @@ void iwl_bg_scan_completed(struct work_struct *work)
priv->is_internal_short_scan = false;
IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
internal = true;
+ } else {
+ priv->scan_request = NULL;
+ priv->scan_vif = NULL;
}
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ goto out;
+
+ if (internal && priv->scan_request)
+ iwl_scan_initiate(priv, priv->scan_vif);
+
+ /* Since setting the TXPOWER may have been deferred while
+ * performing the scan, fire one off */
+ iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+ out:
mutex_unlock(&priv->mutex);
/*
@@ -524,15 +547,6 @@ void iwl_bg_scan_completed(struct work_struct *work)
*/
if (!internal)
ieee80211_scan_completed(priv->hw, false);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* Since setting the TXPOWER may have been deferred while
- * performing the scan, fire one off */
- mutex_lock(&priv->mutex);
- iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
- mutex_unlock(&priv->mutex);
}
EXPORT_SYMBOL(iwl_bg_scan_completed);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index af6babee2891..c4ca0b5d77da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -42,7 +42,7 @@ struct ieee80211_basic_report {
__le64 start_time;
__le16 duration;
u8 map;
-} __attribute__ ((packed));
+} __packed;
enum { /* ieee80211_measurement_request.mode */
/* Bit 0 is reserved */
@@ -63,13 +63,13 @@ struct ieee80211_measurement_params {
u8 channel;
__le64 start_time;
__le16 duration;
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_info_element {
u8 id;
u8 len;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_measurement_request {
struct ieee80211_info_element ie;
@@ -77,7 +77,7 @@ struct ieee80211_measurement_request {
u8 mode;
u8 type;
struct ieee80211_measurement_params params[0];
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_measurement_report {
struct ieee80211_info_element ie;
@@ -87,6 +87,6 @@ struct ieee80211_measurement_report {
union {
struct ieee80211_basic_report basic[0];
} u;
-} __attribute__ ((packed));
+} __packed;
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index c27c13fbb1ae..d57df6c02db3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -311,10 +311,10 @@ int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
struct ieee80211_sta_ht_cap *ht_info,
u8 *sta_id_r)
{
- struct iwl_station_entry *station;
unsigned long flags_spin;
int ret = 0;
u8 sta_id;
+ struct iwl_addsta_cmd sta_cmd;
*sta_id_r = 0;
spin_lock_irqsave(&priv->sta_lock, flags_spin);
@@ -347,14 +347,15 @@ int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
}
priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
- station = &priv->stations[sta_id];
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
/* Add station to device's station table */
- ret = iwl_send_add_sta(priv, &station->sta, CMD_SYNC);
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
if (ret) {
- IWL_ERR(priv, "Adding station %pM failed.\n", station->sta.sta.addr);
spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[sta_id].sta.sta.addr);
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
@@ -488,7 +489,7 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
}
static int iwl_send_remove_station(struct iwl_priv *priv,
- struct iwl_station_entry *station)
+ const u8 *addr, int sta_id)
{
struct iwl_rx_packet *pkt;
int ret;
@@ -505,7 +506,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
rm_sta_cmd.num_sta = 1;
- memcpy(&rm_sta_cmd.addr, &station->sta.sta.addr , ETH_ALEN);
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
cmd.flags |= CMD_WANT_SKB;
@@ -525,7 +526,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
switch (pkt->u.rem_sta.status) {
case REM_STA_SUCCESS_MSK:
spin_lock_irqsave(&priv->sta_lock, flags_spin);
- iwl_sta_ucode_deactivate(priv, station->sta.sta.sta_id);
+ iwl_sta_ucode_deactivate(priv, sta_id);
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
break;
@@ -546,7 +547,6 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr)
{
- struct iwl_station_entry *station;
unsigned long flags;
if (!iwl_is_ready(priv)) {
@@ -592,10 +592,9 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
BUG_ON(priv->num_stations < 0);
- station = &priv->stations[sta_id];
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return iwl_send_remove_station(priv, station);
+ return iwl_send_remove_station(priv, addr, sta_id);
out_err:
spin_unlock_irqrestore(&priv->sta_lock, flags);
return -EINVAL;
@@ -643,11 +642,13 @@ EXPORT_SYMBOL(iwl_clear_ucode_stations);
*/
void iwl_restore_stations(struct iwl_priv *priv)
{
- struct iwl_station_entry *station;
+ struct iwl_addsta_cmd sta_cmd;
+ struct iwl_link_quality_cmd lq;
unsigned long flags_spin;
int i;
bool found = false;
int ret;
+ bool send_lq;
if (!iwl_is_ready(priv)) {
IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
@@ -669,13 +670,20 @@ void iwl_restore_stations(struct iwl_priv *priv)
for (i = 0; i < priv->hw_params.max_stations; i++) {
if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &priv->stations[i].sta,
+ sizeof(struct iwl_addsta_cmd));
+ send_lq = false;
+ if (priv->stations[i].lq) {
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+ send_lq = true;
+ }
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- station = &priv->stations[i];
- ret = iwl_send_add_sta(priv, &priv->stations[i].sta, CMD_SYNC);
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
if (ret) {
- IWL_ERR(priv, "Adding station %pM failed.\n",
- station->sta.sta.addr);
spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[i].sta.sta.addr);
priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
@@ -684,8 +692,8 @@ void iwl_restore_stations(struct iwl_priv *priv)
* Rate scaling has already been initialized, send
* current LQ command
*/
- if (station->lq)
- iwl_send_lq_cmd(priv, station->lq, CMD_SYNC, true);
+ if (send_lq)
+ iwl_send_lq_cmd(priv, &lq, CMD_SYNC, true);
spin_lock_irqsave(&priv->sta_lock, flags_spin);
priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
}
@@ -972,24 +980,16 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
unsigned long flags;
int i;
- if (sta) {
- sta_id = iwl_sta_id(sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not initialised.\n",
- sta->addr);
- return;
- }
- } else
- sta_id = priv->hw_params.bcast_sta_id;
-
-
if (iwl_scan_cancel(priv)) {
/* cancel scan failed, just live w/ bad key and rely
briefly on SW decryption */
return;
}
+ sta_id = iwl_sta_id_or_broadcast(priv, sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return;
+
spin_lock_irqsave(&priv->sta_lock, flags);
priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
@@ -1277,9 +1277,8 @@ void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
}
EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
@@ -1310,7 +1309,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid)
{
unsigned long flags;
- int sta_id;
+ int sta_id, ret;
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
@@ -1323,10 +1322,11 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
- CMD_ASYNC);
+ return ret;
+
}
EXPORT_SYMBOL(iwl_sta_rx_agg_stop);
@@ -1340,9 +1340,9 @@ void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
priv->stations[sta_id].sta.sta.modify_mask = 0;
priv->stations[sta_id].sta.sleep_tx_count = 0;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
}
EXPORT_SYMBOL(iwl_sta_modify_ps_wake);
@@ -1357,9 +1357,9 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
STA_MODIFY_SLEEP_TX_COUNT_MSK;
priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
}
EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index c2a453a1a991..5b1b1e461eb6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -107,4 +107,33 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta)
return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
}
+
+/**
+ * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @priv: iwl priv
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return priv->hw_params.bcast_sta_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IWL_INVALID_STATION);
+
+ return sta_id;
+}
#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 1ece2ea09773..a81989c06983 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -77,21 +77,6 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
}
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
-
-void iwl_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed)
-{
- if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
- priv->stations[sta_id].tid[tid].tfds_in_queue,
- freed);
- priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
- }
-}
-EXPORT_SYMBOL(iwl_free_tfds_in_queue);
-
/**
* iwl_tx_queue_free - Deallocate DMA queue.
* @txq: Transmit queue to deallocate.
@@ -169,15 +154,15 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
}
pci_unmap_single(priv->pci_dev,
- pci_unmap_addr(&txq->meta[i], mapping),
- pci_unmap_len(&txq->meta[i], len),
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
PCI_DMA_BIDIRECTIONAL);
}
if (huge) {
i = q->n_window;
pci_unmap_single(priv->pci_dev,
- pci_unmap_addr(&txq->meta[i], mapping),
- pci_unmap_len(&txq->meta[i], len),
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
PCI_DMA_BIDIRECTIONAL);
}
@@ -287,7 +272,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
/* Driver private data, only for Tx (not command) queues,
* not shared with device. */
if (id != IWL_CMD_QUEUE_NUM) {
- txq->txb = kmalloc(sizeof(txq->txb[0]) *
+ txq->txb = kzalloc(sizeof(txq->txb[0]) *
TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
if (!txq->txb) {
IWL_ERR(priv, "kmalloc for auxiliary BD "
@@ -531,8 +516,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
fix_size, PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(out_meta, mapping, phys_addr);
- pci_unmap_len_set(out_meta, len, fix_size);
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+ dma_unmap_len_set(out_meta, len, fix_size);
trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
@@ -626,8 +611,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
meta = &txq->meta[cmd_index];
pci_unmap_single(priv->pci_dev,
- pci_unmap_addr(meta, mapping),
- pci_unmap_len(meta, len),
+ dma_unmap_addr(meta, mapping),
+ dma_unmap_len(meta, len),
PCI_DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index a27872de4106..697fa6caaceb 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -197,6 +197,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
{
unsigned long flags;
+ struct iwl_addsta_cmd sta_cmd;
spin_lock_irqsave(&priv->sta_lock, flags);
memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
@@ -205,11 +206,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 0);
- return 0;
+ return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
@@ -474,10 +475,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
u8 unicast;
u8 sta_id;
u8 tid = 0;
- u16 seq_number = 0;
__le16 fc;
u8 wait_write_ptr = 0;
- u8 *qc = NULL;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
@@ -510,10 +509,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find index into station table for destination station */
- if (!info->control.sta)
- sta_id = priv->hw_params.bcast_sta_id;
- else
- sta_id = iwl_sta_id(info->control.sta);
+ sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -523,16 +519,10 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
if (unlikely(tid >= MAX_TID_COUNT))
goto drop;
- seq_number = priv->stations[sta_id].tid[tid].seq_number &
- IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = cpu_to_le16(seq_number) |
- (hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG));
- seq_number += 0x10;
}
/* Descriptor for chosen Tx queue */
@@ -548,7 +538,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb[0] = skb;
+ txq->txb[q->write_ptr].skb = skb;
/* Init first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[idx];
@@ -591,8 +581,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
- if (qc)
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
@@ -631,8 +619,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
len, PCI_DMA_TODEVICE);
/* we do not map meta data ... so we can safely access address to
* provide to unmap command*/
- pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
- pci_unmap_len_set(out_meta, len, len);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
@@ -677,55 +665,6 @@ drop:
return -1;
}
-#define BEACON_TIME_MASK_LOW 0x00FFFFFF
-#define BEACON_TIME_MASK_HIGH 0xFF000000
-#define TIME_UNIT 1024
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in 8:24 format
- * the high 1 byte is the beacon counts
- * the lower 3 bytes is the time in usec within one beacon interval
- */
-
-static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval)
-{
- u32 quot;
- u32 rem;
- u32 interval = beacon_interval * 1024;
-
- if (!interval || !usec)
- return 0;
-
- quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
- rem = (usec % interval) & BEACON_TIME_MASK_LOW;
-
- return (quot << 24) + rem;
-}
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-
-static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
-{
- u32 base_low = base & BEACON_TIME_MASK_LOW;
- u32 addon_low = addon & BEACON_TIME_MASK_LOW;
- u32 interval = beacon_interval * TIME_UNIT;
- u32 res = (base & BEACON_TIME_MASK_HIGH) +
- (addon & BEACON_TIME_MASK_HIGH);
-
- if (base_low > addon_low)
- res += base_low - addon_low;
- else if (base_low < addon_low) {
- res += interval + base_low - addon_low;
- res += (1 << 24);
- } else
- res += (1 << 24);
-
- return cpu_to_le32(res);
-}
-
static int iwl3945_get_measurement(struct iwl_priv *priv,
struct ieee80211_measurement_params *params,
u8 type)
@@ -743,8 +682,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
int duration = le16_to_cpu(params->duration);
if (iwl_is_associated(priv))
- add_time =
- iwl3945_usecs_to_beacons(
+ add_time = iwl_usecs_to_beacons(priv,
le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
le16_to_cpu(priv->rxon_timing.beacon_interval));
@@ -759,8 +697,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
if (iwl_is_associated(priv))
spectrum.start_time =
- iwl3945_add_beacon_time(priv->_3945.last_beacon_time,
- add_time,
+ iwl_add_beacon_time(priv,
+ priv->_3945.last_beacon_time, add_time,
le16_to_cpu(priv->rxon_timing.beacon_interval));
else
spectrum.start_time = 0;
@@ -1233,7 +1171,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
}
dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
+ rxq->bd_dma);
dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
@@ -1314,6 +1252,8 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
while (i != r) {
+ int len;
+
rxb = rxq->queue[i];
/* If an RXB doesn't have a Rx queue slot associated with it,
@@ -1328,8 +1268,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
PCI_DMA_FROMDEVICE);
pkt = rxb_addr(rxb);
- trace_iwlwifi_dev_rx(priv, pkt,
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(priv, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
@@ -3022,14 +2963,16 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->tx_cmd.len = cpu_to_le16(
iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
+ vif->addr,
priv->scan_request->ie,
priv->scan_request->ie_len,
IWL_MAX_SCAN_SIZE - sizeof(*scan)));
} else {
+ /* use bcast addr, will not be transmitted but must be valid */
scan->tx_cmd.len = cpu_to_le16(
iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
- NULL, 0,
+ iwl_bcast_addr, NULL, 0,
IWL_MAX_SCAN_SIZE - sizeof(*scan)));
}
/* select Rx antennas */
@@ -3158,19 +3101,16 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
vif->bss_conf.aid, vif->bss_conf.beacon_int);
- if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.use_short_preamble)
priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.use_short_slot)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
- if (vif->type == NL80211_IFTYPE_ADHOC)
- priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
}
iwlcore_commit_rxon(priv);
@@ -3334,8 +3274,7 @@ void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
priv->staging_rxon.assoc_id = 0;
- if (vif->bss_conf.assoc_capability &
- WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.use_short_preamble)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_PREAMBLE_MSK;
else
@@ -3343,17 +3282,12 @@ void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.assoc_capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.use_short_slot)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
-
- if (vif->type == NL80211_IFTYPE_ADHOC)
- priv->staging_rxon.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
@@ -3386,17 +3320,9 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static_key = !iwl_is_associated(priv);
if (!static_key) {
- if (!sta) {
- sta_id = priv->hw_params.bcast_sta_id;
- } else {
- sta_id = iwl_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv,
- "leave - %pM not in station map.\n",
- sta->addr);
- return -EINVAL;
- }
- }
+ sta_id = iwl_sta_id_or_broadcast(priv, sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return -EINVAL;
}
mutex_lock(&priv->mutex);
@@ -4028,9 +3954,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
priv->pci_dev = pdev;
priv->inta_mask = CSR_INI_SET_MASK;
-#ifdef CONFIG_IWLWIFI_DEBUG
- atomic_set(&priv->restrict_refcnt, 0);
-#endif
if (iwl_alloc_traffic_mem(priv))
IWL_ERR(priv, "Not enough memory to generate traffic log\n");
@@ -4099,9 +4022,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
}
/* MAC Address location in EEPROM same for 3945/4965 */
eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- memcpy(priv->mac_addr, eeprom->mac_address, ETH_ALEN);
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr);
- SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
+ IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
+ SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
/***********************
* 5. Setup HW Constants
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 7e16bcf59978..6421689f5e8e 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -56,7 +56,7 @@
struct iwm_umac_cmd_reset {
__le32 flags;
-} __attribute__ ((packed));
+} __packed;
#define UMAC_PARAM_TBL_ORD_FIX 0x0
#define UMAC_PARAM_TBL_ORD_VAR 0x1
@@ -220,37 +220,37 @@ struct iwm_umac_cmd_set_param_fix {
__le16 tbl;
__le16 key;
__le32 value;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_cmd_set_param_var {
__le16 tbl;
__le16 key;
__le16 len;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_cmd_get_param {
__le16 tbl;
__le16 key;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_cmd_get_param_resp {
__le16 tbl;
__le16 key;
__le16 len;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_cmd_eeprom_proxy_hdr {
__le32 type;
__le32 offset;
__le32 len;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_cmd_eeprom_proxy {
struct iwm_umac_cmd_eeprom_proxy_hdr hdr;
u8 buf[0];
-} __attribute__ ((packed));
+} __packed;
#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1
#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2
@@ -267,13 +267,13 @@ struct iwm_umac_channel_info {
u8 reserved;
u8 flags;
__le32 channels_mask;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_cmd_get_channel_list {
__le16 count;
__le16 reserved;
struct iwm_umac_channel_info ch[0];
-} __attribute__ ((packed));
+} __packed;
/* UMAC WiFi interface commands */
@@ -304,7 +304,7 @@ struct iwm_umac_ssid {
u8 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_cmd_scan_request {
struct iwm_umac_wifi_if hdr;
@@ -314,7 +314,7 @@ struct iwm_umac_cmd_scan_request {
u8 timeout; /* In seconds */
u8 reserved;
struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX];
-} __attribute__ ((packed));
+} __packed;
#define UMAC_CIPHER_TYPE_NONE 0xFF
#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00
@@ -357,7 +357,7 @@ struct iwm_umac_security {
u8 ucast_cipher;
u8 mcast_cipher;
u8 flags;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_ibss {
u8 beacon_interval; /* in millisecond */
@@ -366,7 +366,7 @@ struct iwm_umac_ibss {
u8 band;
u8 channel;
u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
#define UMAC_MODE_BSS 0
#define UMAC_MODE_IBSS 1
@@ -385,13 +385,13 @@ struct iwm_umac_profile {
__le16 flags;
u8 wireless_mode;
u8 bss_num;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_invalidate_profile {
struct iwm_umac_wifi_if hdr;
u8 reason;
u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
/* Encryption key commands */
struct iwm_umac_key_wep40 {
@@ -400,7 +400,7 @@ struct iwm_umac_key_wep40 {
u8 key[WLAN_KEY_LEN_WEP40];
u8 static_key;
u8 reserved[2];
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_key_wep104 {
struct iwm_umac_wifi_if hdr;
@@ -408,7 +408,7 @@ struct iwm_umac_key_wep104 {
u8 key[WLAN_KEY_LEN_WEP104];
u8 static_key;
u8 reserved[2];
-} __attribute__ ((packed));
+} __packed;
#define IWM_TKIP_KEY_SIZE 16
#define IWM_TKIP_MIC_SIZE 8
@@ -420,7 +420,7 @@ struct iwm_umac_key_tkip {
u8 tkip_key[IWM_TKIP_KEY_SIZE];
u8 mic_rx_key[IWM_TKIP_MIC_SIZE];
u8 mic_tx_key[IWM_TKIP_MIC_SIZE];
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_key_ccmp {
struct iwm_umac_wifi_if hdr;
@@ -428,27 +428,27 @@ struct iwm_umac_key_ccmp {
u8 iv_count[6];
u8 reserved[2];
u8 key[WLAN_KEY_LEN_CCMP];
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_key_remove {
struct iwm_umac_wifi_if hdr;
struct iwm_umac_key_hdr key_hdr;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_tx_key_id {
struct iwm_umac_wifi_if hdr;
u8 key_idx;
u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_pwr_trigger {
struct iwm_umac_wifi_if hdr;
__le32 reseved;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_cmd_stats_req {
__le32 flags;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_cmd_stop_resume_tx {
u8 flags;
@@ -456,7 +456,7 @@ struct iwm_umac_cmd_stop_resume_tx {
__le16 stop_resume_tid_msk;
__le16 last_seq_num[IWM_UMAC_TID_NR];
u16 reserved;
-} __attribute__ ((packed));
+} __packed;
#define IWM_CMD_PMKID_ADD 1
#define IWM_CMD_PMKID_DEL 2
@@ -468,7 +468,7 @@ struct iwm_umac_pmkid_update {
u8 bssid[ETH_ALEN];
__le16 reserved;
u8 pmkid[WLAN_PMKID_LEN];
-} __attribute__ ((packed));
+} __packed;
/* LMAC commands */
int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index 9531b18cf72a..907ac890997c 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -54,7 +54,7 @@
* LMAC. If you look at LMAC commands you'll se that they
* are actually regular iwlwifi target commands encapsulated
* into a special UMAC command called UMAC passthrough.
- * This is due to the fact the the host talks exclusively
+ * This is due to the fact the host talks exclusively
* to the UMAC and so there needs to be a special UMAC
* command for talking to the LMAC.
* This is how a wifi command is layed out:
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 13266c3842f8..51d7efa15ae6 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -162,7 +162,7 @@ struct iwm_umac_key_hdr {
u8 mac[ETH_ALEN];
u8 key_idx;
u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */
-} __attribute__ ((packed));
+} __packed;
struct iwm_key {
struct iwm_umac_key_hdr hdr;
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index a855a99e49b8..5ddcdf8c70c0 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -43,7 +43,7 @@ struct iwm_lmac_hdr {
u8 id;
u8 flags;
__le16 seq_num;
-} __attribute__ ((packed));
+} __packed;
/* LMAC commands */
#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1
@@ -54,23 +54,23 @@ struct iwm_lmac_cal_cfg_elt {
__le32 send_res; /* 1 for sending back results */
__le32 apply_res; /* 1 for applying calibration results to HW */
__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwm_lmac_cal_cfg_status {
struct iwm_lmac_cal_cfg_elt init;
struct iwm_lmac_cal_cfg_elt periodic;
__le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */
-} __attribute__ ((packed));
+} __packed;
struct iwm_lmac_cal_cfg_cmd {
struct iwm_lmac_cal_cfg_status ucode_cfg;
struct iwm_lmac_cal_cfg_status driver_cfg;
__le32 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwm_lmac_cal_cfg_resp {
__le32 status;
-} __attribute__ ((packed));
+} __packed;
#define IWM_CARD_STATE_SW_HW_ENABLED 0x00
#define IWM_CARD_STATE_HW_DISABLED 0x01
@@ -80,7 +80,7 @@ struct iwm_lmac_cal_cfg_resp {
struct iwm_lmac_card_state {
__le32 flags;
-} __attribute__ ((packed));
+} __packed;
/**
* COEX_PRIORITY_TABLE_CMD
@@ -131,7 +131,7 @@ struct coex_event {
u8 win_med_prio;
u8 reserved;
u8 flags;
-} __attribute__ ((packed));
+} __packed;
#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1
#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4
@@ -142,7 +142,7 @@ struct iwm_coex_prio_table_cmd {
u8 flags;
u8 reserved[3];
struct coex_event sta_prio[COEX_EVENTS_NUM];
-} __attribute__ ((packed));
+} __packed;
/* Coexistence definitions
*
@@ -192,7 +192,7 @@ struct iwm_ct_kill_cfg_cmd {
u32 exit_threshold;
u32 reserved;
u32 entry_threshold;
-} __attribute__ ((packed));
+} __packed;
/* LMAC OP CODES */
@@ -428,7 +428,7 @@ struct iwm_lmac_calib_hdr {
u8 first_grp;
u8 grp_num;
u8 all_data_valid;
-} __attribute__ ((packed));
+} __packed;
#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7
#define IWM_CALIB_FREQ_GROUPS_NR 5
@@ -437,20 +437,20 @@ struct iwm_lmac_calib_hdr {
struct iwm_calib_rxiq_entry {
u16 ptam_postdist_ars;
u16 ptam_postdist_arc;
-} __attribute__ ((packed));
+} __packed;
struct iwm_calib_rxiq_group {
struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR];
-} __attribute__ ((packed));
+} __packed;
struct iwm_lmac_calib_rxiq {
struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR];
-} __attribute__ ((packed));
+} __packed;
struct iwm_calib_rxiq {
struct iwm_lmac_calib_hdr hdr;
struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR];
-} __attribute__ ((packed));
+} __packed;
#define LMAC_STA_ID_SEED 0x0f
#define LMAC_STA_ID_POS 0
@@ -463,7 +463,7 @@ struct iwm_lmac_power_report {
u8 pa_integ_res_A[3];
u8 pa_integ_res_B[3];
u8 pa_integ_res_C[3];
-} __attribute__ ((packed));
+} __packed;
struct iwm_lmac_tx_resp {
u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */
@@ -479,6 +479,6 @@ struct iwm_lmac_tx_resp {
u8 ra_tid;
__le16 frame_ctl;
__le32 status;
-} __attribute__ ((packed));
+} __packed;
#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index e1184deca559..c02fcedea9fa 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -321,14 +321,14 @@ iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket)
return ERR_PTR(-ENOMEM);
}
- ticket_node->ticket = kzalloc(sizeof(struct iwm_rx_ticket), GFP_KERNEL);
+ ticket_node->ticket = kmemdup(ticket, sizeof(struct iwm_rx_ticket),
+ GFP_KERNEL);
if (!ticket_node->ticket) {
IWM_ERR(iwm, "Couldn't allocate RX ticket\n");
kfree(ticket_node);
return ERR_PTR(-ENOMEM);
}
- memcpy(ticket_node->ticket, ticket, sizeof(struct iwm_rx_ticket));
INIT_LIST_HEAD(&ticket_node->node);
return ticket_node;
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index 0cbba3ecc813..4a137d334a42 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -42,19 +42,19 @@
struct iwm_udma_in_hdr {
__le32 cmd;
__le32 size;
-} __attribute__ ((packed));
+} __packed;
struct iwm_udma_out_nonwifi_hdr {
__le32 cmd;
__le32 addr;
__le32 op1_sz;
__le32 op2;
-} __attribute__ ((packed));
+} __packed;
struct iwm_udma_out_wifi_hdr {
__le32 cmd;
__le32 meta_data;
-} __attribute__ ((packed));
+} __packed;
/* Sequence numbering */
#define UMAC_WIFI_SEQ_NUM_BASE 1
@@ -408,12 +408,12 @@ struct iwm_rx_ticket {
__le16 flags;
u8 payload_offset; /* includes: MAC header, pad, IV */
u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */
-} __attribute__ ((packed));
+} __packed;
struct iwm_rx_mpdu_hdr {
__le16 len;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
/* UMAC SW WIFI API */
@@ -421,31 +421,31 @@ struct iwm_dev_cmd_hdr {
u8 cmd;
u8 flags;
__le16 seq_num;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_fw_cmd_hdr {
__le32 meta_data;
struct iwm_dev_cmd_hdr cmd;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_wifi_out_hdr {
struct iwm_udma_out_wifi_hdr hw_hdr;
struct iwm_umac_fw_cmd_hdr sw_hdr;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_nonwifi_out_hdr {
struct iwm_udma_out_nonwifi_hdr hw_hdr;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_wifi_in_hdr {
struct iwm_udma_in_hdr hw_hdr;
struct iwm_umac_fw_cmd_hdr sw_hdr;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_nonwifi_in_hdr {
struct iwm_udma_in_hdr hw_hdr;
__le32 time_stamp;
-} __attribute__ ((packed));
+} __packed;
#define IWM_UMAC_PAGE_SIZE 0x200
@@ -521,7 +521,7 @@ struct iwm_umac_notif_wifi_if {
u8 status;
u8 flags;
__le16 buf_size;
-} __attribute__ ((packed));
+} __packed;
#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1
#define UMAC_ROAM_REASON_AP_DEAUTH 0x2
@@ -535,7 +535,7 @@ struct iwm_umac_notif_assoc_start {
__le32 roam_reason;
u8 bssid[ETH_ALEN];
u8 reserved[2];
-} __attribute__ ((packed));
+} __packed;
#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0
#define UMAC_ASSOC_COMPLETE_FAILURE 0x1
@@ -546,7 +546,7 @@ struct iwm_umac_notif_assoc_complete {
u8 bssid[ETH_ALEN];
u8 band;
u8 channel;
-} __attribute__ ((packed));
+} __packed;
#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0
#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1
@@ -556,7 +556,7 @@ struct iwm_umac_notif_assoc_complete {
struct iwm_umac_notif_profile_invalidate {
struct iwm_umac_notif_wifi_if mlme_hdr;
__le32 reason;
-} __attribute__ ((packed));
+} __packed;
#define UMAC_SCAN_RESULT_SUCCESS 0x0
#define UMAC_SCAN_RESULT_ABORTED 0x1
@@ -568,7 +568,7 @@ struct iwm_umac_notif_scan_complete {
__le32 type;
__le32 result;
u8 seq_num;
-} __attribute__ ((packed));
+} __packed;
#define UMAC_OPCODE_ADD_MODIFY 0x0
#define UMAC_OPCODE_REMOVE 0x1
@@ -582,7 +582,7 @@ struct iwm_umac_notif_sta_info {
u8 mac_addr[ETH_ALEN];
u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */
u8 flags;
-} __attribute__ ((packed));
+} __packed;
#define UMAC_BAND_2GHZ 0
#define UMAC_BAND_5GHZ 1
@@ -601,7 +601,7 @@ struct iwm_umac_notif_bss_info {
s8 rssi;
u8 reserved;
u8 frame_buf[1];
-} __attribute__ ((packed));
+} __packed;
#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff
#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00
@@ -614,13 +614,13 @@ struct iwm_umac_notif_bss_removed {
struct iwm_umac_notif_wifi_if mlme_hdr;
__le32 count;
__le16 entries[0];
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_notif_mgt_frame {
struct iwm_umac_notif_wifi_if mlme_hdr;
__le16 len;
u8 frame[1];
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_notif_alive {
struct iwm_umac_wifi_in_hdr hdr;
@@ -630,13 +630,13 @@ struct iwm_umac_notif_alive {
__le16 reserved2;
__le16 page_grp_count;
__le32 page_grp_state[IWM_MACS_OUT_GROUPS];
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_notif_init_complete {
struct iwm_umac_wifi_in_hdr hdr;
__le16 status;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
/* error categories */
enum {
@@ -667,12 +667,12 @@ struct iwm_fw_error_hdr {
__le32 dbm_buf_end;
__le32 dbm_buf_write_ptr;
__le32 dbm_buf_cycle_cnt;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_notif_error {
struct iwm_umac_wifi_in_hdr hdr;
struct iwm_fw_error_hdr err;
-} __attribute__ ((packed));
+} __packed;
#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0
#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff
@@ -687,20 +687,20 @@ struct iwm_umac_notif_page_dealloc {
struct iwm_umac_wifi_in_hdr hdr;
__le32 changes;
__le32 grp_info[IWM_MACS_OUT_GROUPS];
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_notif_wifi_status {
struct iwm_umac_wifi_in_hdr hdr;
__le16 status;
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct iwm_umac_notif_rx_ticket {
struct iwm_umac_wifi_in_hdr hdr;
u8 num_tickets;
u8 reserved[3];
struct iwm_rx_ticket tickets[1];
-} __attribute__ ((packed));
+} __packed;
/* Tx/Rx rates window (number of max of last update window per second) */
#define UMAC_NTF_RATE_SAMPLE_NR 4
@@ -758,7 +758,7 @@ struct iwm_umac_notif_stats {
__le32 roam_unassoc;
__le32 roam_deauth;
__le32 roam_ap_loadblance;
-} __attribute__ ((packed));
+} __packed;
#define UMAC_STOP_TX_FLAG 0x1
#define UMAC_RESUME_TX_FLAG 0x2
@@ -770,7 +770,7 @@ struct iwm_umac_notif_stop_resume_tx {
u8 flags; /* UMAC_*_TX_FLAG_* */
u8 sta_id;
__le16 stop_resume_tid_msk; /* tid bitmask */
-} __attribute__ ((packed));
+} __packed;
#define UMAC_MAX_NUM_PMKIDS 4
@@ -779,7 +779,7 @@ struct iwm_umac_wifi_if {
u8 oid;
u8 flags;
__le16 buf_size;
-} __attribute__ ((packed));
+} __packed;
#define IWM_SEQ_NUM_HOST_MSK 0x0000
#define IWM_SEQ_NUM_UMAC_MSK 0x4000
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index cdb9b9650d73..0fa6b0e59ea5 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -70,6 +70,8 @@ static u8 is_command_allowed_in_ps(u16 cmd)
switch (cmd) {
case CMD_802_11_RSSI:
return 1;
+ case CMD_802_11_HOST_SLEEP_CFG:
+ return 1;
default:
break;
}
@@ -185,6 +187,23 @@ out:
return ret;
}
+static int lbs_ret_host_sleep_cfg(struct lbs_private *priv, unsigned long dummy,
+ struct cmd_header *resp)
+{
+ lbs_deb_enter(LBS_DEB_CMD);
+ if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+ priv->is_host_sleep_configured = 0;
+ if (priv->psstate == PS_STATE_FULL_POWER) {
+ priv->is_host_sleep_activated = 0;
+ wake_up_interruptible(&priv->host_sleep_q);
+ }
+ } else {
+ priv->is_host_sleep_configured = 1;
+ }
+ lbs_deb_leave(LBS_DEB_CMD);
+ return 0;
+}
+
int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
struct wol_config *p_wol_config)
{
@@ -202,12 +221,11 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
else
cmd_config.wol_conf.action = CMD_ACT_ACTION_NONE;
- ret = lbs_cmd_with_response(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config);
+ ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config.hdr,
+ le16_to_cpu(cmd_config.hdr.size),
+ lbs_ret_host_sleep_cfg, 0);
if (!ret) {
- if (criteria) {
- lbs_deb_cmd("Set WOL criteria to %x\n", criteria);
- priv->wol_criteria = criteria;
- } else
+ if (p_wol_config)
memcpy((uint8_t *) p_wol_config,
(uint8_t *)&cmd_config.wol_conf,
sizeof(struct wol_config));
@@ -712,6 +730,10 @@ static void lbs_queue_cmd(struct lbs_private *priv,
}
}
+ if (le16_to_cpu(cmdnode->cmdbuf->command) ==
+ CMD_802_11_WAKEUP_CONFIRM)
+ addtail = 0;
+
spin_lock_irqsave(&priv->driver_lock, flags);
if (addtail)
@@ -1353,6 +1375,11 @@ static void lbs_send_confirmsleep(struct lbs_private *priv)
/* We don't get a response on the sleep-confirmation */
priv->dnld_sent = DNLD_RES_RECEIVED;
+ if (priv->is_host_sleep_configured) {
+ priv->is_host_sleep_activated = 1;
+ wake_up_interruptible(&priv->host_sleep_q);
+ }
+
/* If nothing to do, go back to sleep (?) */
if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx])
priv->psstate = PS_STATE_SLEEP;
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 88f7131d66e9..d6c306353640 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -17,6 +17,7 @@
#include "dev.h"
#include "assoc.h"
#include "wext.h"
+#include "cmd.h"
/**
* @brief This function handles disconnect event. it
@@ -341,32 +342,10 @@ done:
return ret;
}
-static int lbs_send_confirmwake(struct lbs_private *priv)
-{
- struct cmd_header cmd;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_HOST);
-
- cmd.command = cpu_to_le16(CMD_802_11_WAKEUP_CONFIRM);
- cmd.size = cpu_to_le16(sizeof(cmd));
- cmd.seqnum = cpu_to_le16(++priv->seqnum);
- cmd.result = 0;
-
- lbs_deb_hex(LBS_DEB_HOST, "wake confirm", (u8 *) &cmd,
- sizeof(cmd));
-
- ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &cmd, sizeof(cmd));
- if (ret)
- lbs_pr_alert("SEND_WAKEC_CMD: Host to Card failed for Confirm Wake\n");
-
- lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret);
- return ret;
-}
-
int lbs_process_event(struct lbs_private *priv, u32 event)
{
int ret = 0;
+ struct cmd_header cmd;
lbs_deb_enter(LBS_DEB_CMD);
@@ -410,7 +389,10 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
if (priv->reset_deep_sleep_wakeup)
priv->reset_deep_sleep_wakeup(priv);
priv->is_deep_sleep = 0;
- lbs_send_confirmwake(priv);
+ lbs_cmd_async(priv, CMD_802_11_WAKEUP_CONFIRM, &cmd,
+ sizeof(cmd));
+ priv->is_host_sleep_activated = 0;
+ wake_up_interruptible(&priv->host_sleep_q);
break;
case MACREG_INT_CODE_DEEP_SLEEP_AWAKE:
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 709ffcad22ad..61db8bc62b3c 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -38,7 +38,7 @@ int lbs_set_mac_address(struct net_device *dev, void *addr);
void lbs_set_multicast_list(struct net_device *dev);
int lbs_suspend(struct lbs_private *priv);
-void lbs_resume(struct lbs_private *priv);
+int lbs_resume(struct lbs_private *priv);
void lbs_queue_event(struct lbs_private *priv, u32 event);
void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index a54880e4ad2b..71c5ad46ebf6 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -75,6 +75,7 @@ struct lbs_private {
/* Deep sleep */
int is_deep_sleep;
+ int deep_sleep_required;
int is_auto_deep_sleep_enabled;
int wakeup_dev_required;
int is_activity_detected;
@@ -82,6 +83,11 @@ struct lbs_private {
wait_queue_head_t ds_awake_q;
struct timer_list auto_deepsleep_timer;
+ /* Host sleep*/
+ int is_host_sleep_configured;
+ int is_host_sleep_activated;
+ wait_queue_head_t host_sleep_q;
+
/* Hardware access */
void *card;
u8 fw_ready;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 3804a58d7f4e..0cf31bbf6567 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -69,14 +69,11 @@ static void lbs_ethtool_get_wol(struct net_device *dev,
{
struct lbs_private *priv = dev->ml_priv;
- if (priv->wol_criteria == 0xffffffff) {
- /* Interface driver didn't configure wake */
- wol->supported = wol->wolopts = 0;
- return;
- }
-
wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY;
+ if (priv->wol_criteria == EHS_REMOVE_WAKEUP)
+ return;
+
if (priv->wol_criteria & EHS_WAKE_ON_UNICAST_DATA)
wol->wolopts |= WAKE_UCAST;
if (priv->wol_criteria & EHS_WAKE_ON_MULTICAST_DATA)
@@ -91,23 +88,22 @@ static int lbs_ethtool_set_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct lbs_private *priv = dev->ml_priv;
- uint32_t criteria = 0;
if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY))
return -EOPNOTSUPP;
+ priv->wol_criteria = 0;
if (wol->wolopts & WAKE_UCAST)
- criteria |= EHS_WAKE_ON_UNICAST_DATA;
+ priv->wol_criteria |= EHS_WAKE_ON_UNICAST_DATA;
if (wol->wolopts & WAKE_MCAST)
- criteria |= EHS_WAKE_ON_MULTICAST_DATA;
+ priv->wol_criteria |= EHS_WAKE_ON_MULTICAST_DATA;
if (wol->wolopts & WAKE_BCAST)
- criteria |= EHS_WAKE_ON_BROADCAST_DATA;
+ priv->wol_criteria |= EHS_WAKE_ON_BROADCAST_DATA;
if (wol->wolopts & WAKE_PHY)
- criteria |= EHS_WAKE_ON_MAC_EVENT;
+ priv->wol_criteria |= EHS_WAKE_ON_MAC_EVENT;
if (wol->wolopts == 0)
- criteria |= EHS_REMOVE_WAKEUP;
-
- return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL);
+ priv->wol_criteria |= EHS_REMOVE_WAKEUP;
+ return 0;
}
const struct ethtool_ops lbs_ethtool_ops = {
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 3809c0b49464..3bd5d3b6037a 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -326,7 +326,7 @@ struct txpd {
u8 pktdelay_2ms;
/* reserved */
u8 reserved1;
-} __attribute__ ((packed));
+} __packed;
/* RxPD Descriptor */
struct rxpd {
@@ -339,8 +339,8 @@ struct rxpd {
u8 bss_type;
/* BSS number */
u8 bss_num;
- } __attribute__ ((packed)) bss;
- } __attribute__ ((packed)) u;
+ } __packed bss;
+ } __packed u;
/* SNR */
u8 snr;
@@ -366,14 +366,14 @@ struct rxpd {
/* Pkt Priority */
u8 priority;
u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
struct cmd_header {
__le16 command;
__le16 size;
__le16 seqnum;
__le16 result;
-} __attribute__ ((packed));
+} __packed;
/* Generic structure to hold all key types. */
struct enc_key {
@@ -387,7 +387,7 @@ struct enc_key {
struct lbs_offset_value {
u32 offset;
u32 value;
-} __attribute__ ((packed));
+} __packed;
/*
* Define data structure for CMD_GET_HW_SPEC
@@ -426,7 +426,7 @@ struct cmd_ds_get_hw_spec {
/*FW/HW capability */
__le32 fwcapinfo;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_subscribe_event {
struct cmd_header hdr;
@@ -440,7 +440,7 @@ struct cmd_ds_802_11_subscribe_event {
* bump this up a bit.
*/
uint8_t tlv[128];
-} __attribute__ ((packed));
+} __packed;
/*
* This scan handle Country Information IE(802.11d compliant)
@@ -452,7 +452,7 @@ struct cmd_ds_802_11_scan {
uint8_t bsstype;
uint8_t bssid[ETH_ALEN];
uint8_t tlvbuffer[0];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_scan_rsp {
struct cmd_header hdr;
@@ -460,7 +460,7 @@ struct cmd_ds_802_11_scan_rsp {
__le16 bssdescriptsize;
uint8_t nr_sets;
uint8_t bssdesc_and_tlvbuffer[0];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_get_log {
struct cmd_header hdr;
@@ -478,20 +478,20 @@ struct cmd_ds_802_11_get_log {
__le32 fcserror;
__le32 txframe;
__le32 wepundecryptable;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_mac_control {
struct cmd_header hdr;
__le16 action;
u16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_mac_multicast_adr {
struct cmd_header hdr;
__le16 action;
__le16 nr_of_adrs;
u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_authenticate {
struct cmd_header hdr;
@@ -499,14 +499,14 @@ struct cmd_ds_802_11_authenticate {
u8 bssid[ETH_ALEN];
u8 authtype;
u8 reserved[10];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_deauthenticate {
struct cmd_header hdr;
u8 macaddr[ETH_ALEN];
__le16 reasoncode;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_associate {
struct cmd_header hdr;
@@ -517,7 +517,7 @@ struct cmd_ds_802_11_associate {
__le16 bcnperiod;
u8 dtimperiod;
u8 iebuf[512]; /* Enough for required and most optional IEs */
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_associate_response {
struct cmd_header hdr;
@@ -526,7 +526,7 @@ struct cmd_ds_802_11_associate_response {
__le16 statuscode;
__le16 aid;
u8 iebuf[512];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_set_wep {
struct cmd_header hdr;
@@ -540,7 +540,7 @@ struct cmd_ds_802_11_set_wep {
/* 40, 128bit or TXWEP */
uint8_t keytype[4];
uint8_t keymaterial[4][16];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_snmp_mib {
struct cmd_header hdr;
@@ -549,40 +549,40 @@ struct cmd_ds_802_11_snmp_mib {
__le16 oid;
__le16 bufsize;
u8 value[128];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_mac_reg_access {
__le16 action;
__le16 offset;
__le32 value;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_bbp_reg_access {
__le16 action;
__le16 offset;
u8 value;
u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_rf_reg_access {
__le16 action;
__le16 offset;
u8 value;
u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_radio_control {
struct cmd_header hdr;
__le16 action;
__le16 control;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_beacon_control {
__le16 action;
__le16 beacon_enable;
__le16 beacon_period;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_sleep_params {
struct cmd_header hdr;
@@ -607,7 +607,7 @@ struct cmd_ds_802_11_sleep_params {
/* reserved field, should be set to zero */
__le16 reserved;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_rf_channel {
struct cmd_header hdr;
@@ -617,7 +617,7 @@ struct cmd_ds_802_11_rf_channel {
__le16 rftype; /* unused */
__le16 reserved; /* unused */
u8 channellist[32]; /* unused */
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_rssi {
/* weighting factor */
@@ -626,21 +626,21 @@ struct cmd_ds_802_11_rssi {
__le16 reserved_0;
__le16 reserved_1;
__le16 reserved_2;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_rssi_rsp {
__le16 SNR;
__le16 noisefloor;
__le16 avgSNR;
__le16 avgnoisefloor;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_mac_address {
struct cmd_header hdr;
__le16 action;
u8 macadd[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_rf_tx_power {
struct cmd_header hdr;
@@ -649,26 +649,26 @@ struct cmd_ds_802_11_rf_tx_power {
__le16 curlevel;
s8 maxlevel;
s8 minlevel;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_monitor_mode {
__le16 action;
__le16 mode;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_set_boot2_ver {
struct cmd_header hdr;
__le16 action;
__le16 version;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_fw_wake_method {
struct cmd_header hdr;
__le16 action;
__le16 method;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_ps_mode {
__le16 action;
@@ -676,7 +676,7 @@ struct cmd_ds_802_11_ps_mode {
__le16 multipledtim;
__le16 reserved;
__le16 locallisteninterval;
-} __attribute__ ((packed));
+} __packed;
struct cmd_confirm_sleep {
struct cmd_header hdr;
@@ -686,7 +686,7 @@ struct cmd_confirm_sleep {
__le16 multipledtim;
__le16 reserved;
__le16 locallisteninterval;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_data_rate {
struct cmd_header hdr;
@@ -694,14 +694,14 @@ struct cmd_ds_802_11_data_rate {
__le16 action;
__le16 reserved;
u8 rates[MAX_RATES];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_rate_adapt_rateset {
struct cmd_header hdr;
__le16 action;
__le16 enablehwauto;
__le16 bitmap;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_ad_hoc_start {
struct cmd_header hdr;
@@ -718,14 +718,14 @@ struct cmd_ds_802_11_ad_hoc_start {
__le16 capability;
u8 rates[MAX_RATES];
u8 tlv_memory_size_pad[100];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_ad_hoc_result {
struct cmd_header hdr;
u8 pad[3];
u8 bssid[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
struct adhoc_bssdesc {
u8 bssid[ETH_ALEN];
@@ -746,7 +746,7 @@ struct adhoc_bssdesc {
* Adhoc join command and will cause a binary layout mismatch with
* the firmware
*/
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_ad_hoc_join {
struct cmd_header hdr;
@@ -754,18 +754,18 @@ struct cmd_ds_802_11_ad_hoc_join {
struct adhoc_bssdesc bss;
__le16 failtimeout; /* Reserved on v9 and later */
__le16 probedelay; /* Reserved on v9 and later */
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_ad_hoc_stop {
struct cmd_header hdr;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_enable_rsn {
struct cmd_header hdr;
__le16 action;
__le16 enable;
-} __attribute__ ((packed));
+} __packed;
struct MrvlIEtype_keyParamSet {
/* type ID */
@@ -785,7 +785,7 @@ struct MrvlIEtype_keyParamSet {
/* key material of size keylen */
u8 key[32];
-} __attribute__ ((packed));
+} __packed;
#define MAX_WOL_RULES 16
@@ -797,7 +797,7 @@ struct host_wol_rule {
__le16 reserve;
__be32 sig_mask;
__be32 signature;
-} __attribute__ ((packed));
+} __packed;
struct wol_config {
uint8_t action;
@@ -805,7 +805,7 @@ struct wol_config {
uint8_t no_rules_in_cmd;
uint8_t result;
struct host_wol_rule rule[MAX_WOL_RULES];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_host_sleep {
struct cmd_header hdr;
@@ -813,7 +813,7 @@ struct cmd_ds_host_sleep {
uint8_t gpio;
uint16_t gap;
struct wol_config wol_conf;
-} __attribute__ ((packed));
+} __packed;
@@ -822,7 +822,7 @@ struct cmd_ds_802_11_key_material {
__le16 action;
struct MrvlIEtype_keyParamSet keyParamSet[2];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_eeprom_access {
struct cmd_header hdr;
@@ -832,7 +832,7 @@ struct cmd_ds_802_11_eeprom_access {
/* firmware says it returns a maximum of 20 bytes */
#define LBS_EEPROM_READ_LEN 20
u8 value[LBS_EEPROM_READ_LEN];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_tpc_cfg {
struct cmd_header hdr;
@@ -843,7 +843,7 @@ struct cmd_ds_802_11_tpc_cfg {
int8_t P1;
int8_t P2;
uint8_t usesnr;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_pa_cfg {
@@ -854,14 +854,14 @@ struct cmd_ds_802_11_pa_cfg {
int8_t P0;
int8_t P1;
int8_t P2;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_led_ctrl {
__le16 action;
__le16 numled;
u8 data[256];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_802_11_afc {
__le16 afc_auto;
@@ -875,22 +875,22 @@ struct cmd_ds_802_11_afc {
__le16 carrier_offset; /* signed */
};
};
-} __attribute__ ((packed));
+} __packed;
struct cmd_tx_rate_query {
__le16 txrate;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_get_tsf {
__le64 tsfvalue;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_bt_access {
__le16 action;
__le32 id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_fwt_access {
__le16 action;
@@ -910,7 +910,7 @@ struct cmd_ds_fwt_access {
__le32 snr;
__le32 references;
u8 prec[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_mesh_config {
struct cmd_header hdr;
@@ -920,14 +920,14 @@ struct cmd_ds_mesh_config {
__le16 type;
__le16 length;
u8 data[128]; /* last position reserved */
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_mesh_access {
struct cmd_header hdr;
__le16 action;
__le32 data[32]; /* last position reserved */
-} __attribute__ ((packed));
+} __packed;
/* Number of stats counters returned by the firmware */
#define MESH_STATS_NUM 8
@@ -957,6 +957,6 @@ struct cmd_ds_command {
struct cmd_ds_fwt_access fwt;
struct cmd_ds_802_11_beacon_control bcn_ctrl;
} params;
-} __attribute__ ((packed));
+} __packed;
#endif
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 64dd345d30f5..6e71346a7550 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1182,11 +1182,69 @@ static void if_sdio_remove(struct sdio_func *func)
lbs_deb_leave(LBS_DEB_SDIO);
}
+static int if_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ int ret;
+ struct if_sdio_card *card = sdio_get_drvdata(func);
+
+ mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
+
+ lbs_pr_info("%s: suspend: PM flags = 0x%x\n",
+ sdio_func_id(func), flags);
+
+ /* If we aren't being asked to wake on anything, we should bail out
+ * and let the SD stack power down the card.
+ */
+ if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+ lbs_pr_info("Suspend without wake params -- "
+ "powering down card.");
+ return -ENOSYS;
+ }
+
+ if (!(flags & MMC_PM_KEEP_POWER)) {
+ lbs_pr_err("%s: cannot remain alive while host is suspended\n",
+ sdio_func_id(func));
+ return -ENOSYS;
+ }
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret)
+ return ret;
+
+ ret = lbs_suspend(card->priv);
+ if (ret)
+ return ret;
+
+ return sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+}
+
+static int if_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct if_sdio_card *card = sdio_get_drvdata(func);
+ int ret;
+
+ lbs_pr_info("%s: resume: we're back\n", sdio_func_id(func));
+
+ ret = lbs_resume(card->priv);
+
+ return ret;
+}
+
+static const struct dev_pm_ops if_sdio_pm_ops = {
+ .suspend = if_sdio_suspend,
+ .resume = if_sdio_resume,
+};
+
static struct sdio_driver if_sdio_driver = {
.name = "libertas_sdio",
.id_table = if_sdio_ids,
.probe = if_sdio_probe,
.remove = if_sdio_remove,
+ .drv = {
+ .pm = &if_sdio_pm_ops,
+ },
};
/*******************************************************************/
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index f41594c7ac16..3678e532874f 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -613,16 +613,14 @@ static void if_usb_receive_fwload(struct urb *urb)
return;
}
- syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
+ syncfwheader = kmemdup(skb->data + IPFIELD_ALIGN_OFFSET,
+ sizeof(struct fwsyncheader), GFP_ATOMIC);
if (!syncfwheader) {
lbs_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
kfree_skb(skb);
return;
}
- memcpy(syncfwheader, skb->data + IPFIELD_ALIGN_OFFSET,
- sizeof(struct fwsyncheader));
-
if (!syncfwheader->cmd) {
lbs_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
lbs_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
@@ -1043,6 +1041,12 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message)
if (priv->psstate != PS_STATE_FULL_POWER)
return -1;
+ if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
+ lbs_pr_info("Suspend attempt without "
+ "configuring wake params!\n");
+ return -ENOSYS;
+ }
+
ret = lbs_suspend(priv);
if (ret)
goto out;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index d9b8ee130c45..abfecc4814b4 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -625,16 +625,13 @@ static int lbs_thread(void *data)
return 0;
}
-static int lbs_suspend_callback(struct lbs_private *priv, unsigned long dummy,
- struct cmd_header *cmd)
+static int lbs_ret_host_sleep_activate(struct lbs_private *priv,
+ unsigned long dummy,
+ struct cmd_header *cmd)
{
lbs_deb_enter(LBS_DEB_FW);
-
- netif_device_detach(priv->dev);
- if (priv->mesh_dev)
- netif_device_detach(priv->mesh_dev);
-
- priv->fw_ready = 0;
+ priv->is_host_sleep_activated = 1;
+ wake_up_interruptible(&priv->host_sleep_q);
lbs_deb_leave(LBS_DEB_FW);
return 0;
}
@@ -646,39 +643,65 @@ int lbs_suspend(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_FW);
- if (priv->wol_criteria == 0xffffffff) {
- lbs_pr_info("Suspend attempt without configuring wake params!\n");
- return -EINVAL;
+ if (priv->is_deep_sleep) {
+ ret = lbs_set_deep_sleep(priv, 0);
+ if (ret) {
+ lbs_pr_err("deep sleep cancellation failed: %d\n", ret);
+ return ret;
+ }
+ priv->deep_sleep_required = 1;
}
memset(&cmd, 0, sizeof(cmd));
+ ret = lbs_host_sleep_cfg(priv, priv->wol_criteria,
+ (struct wol_config *)NULL);
+ if (ret) {
+ lbs_pr_info("Host sleep configuration failed: %d\n", ret);
+ return ret;
+ }
+ if (priv->psstate == PS_STATE_FULL_POWER) {
+ ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_ACTIVATE, &cmd,
+ sizeof(cmd), lbs_ret_host_sleep_activate, 0);
+ if (ret)
+ lbs_pr_info("HOST_SLEEP_ACTIVATE failed: %d\n", ret);
+ }
- ret = __lbs_cmd(priv, CMD_802_11_HOST_SLEEP_ACTIVATE, &cmd,
- sizeof(cmd), lbs_suspend_callback, 0);
- if (ret)
- lbs_pr_info("HOST_SLEEP_ACTIVATE failed: %d\n", ret);
+ if (!wait_event_interruptible_timeout(priv->host_sleep_q,
+ priv->is_host_sleep_activated, (10 * HZ))) {
+ lbs_pr_err("host_sleep_q: timer expired\n");
+ ret = -1;
+ }
+ netif_device_detach(priv->dev);
+ if (priv->mesh_dev)
+ netif_device_detach(priv->mesh_dev);
lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(lbs_suspend);
-void lbs_resume(struct lbs_private *priv)
+int lbs_resume(struct lbs_private *priv)
{
- lbs_deb_enter(LBS_DEB_FW);
+ int ret;
+ uint32_t criteria = EHS_REMOVE_WAKEUP;
- priv->fw_ready = 1;
+ lbs_deb_enter(LBS_DEB_FW);
- /* Firmware doesn't seem to give us RX packets any more
- until we send it some command. Might as well update */
- lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
- 0, 0, NULL);
+ ret = lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL);
netif_device_attach(priv->dev);
if (priv->mesh_dev)
netif_device_attach(priv->mesh_dev);
- lbs_deb_leave(LBS_DEB_FW);
+ if (priv->deep_sleep_required) {
+ priv->deep_sleep_required = 0;
+ ret = lbs_set_deep_sleep(priv, 1);
+ if (ret)
+ lbs_pr_err("deep sleep activation failed: %d\n", ret);
+ }
+
+ lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(lbs_resume);
@@ -834,10 +857,13 @@ static int lbs_init_adapter(struct lbs_private *priv)
priv->psstate = PS_STATE_FULL_POWER;
priv->is_deep_sleep = 0;
priv->is_auto_deep_sleep_enabled = 0;
+ priv->deep_sleep_required = 0;
priv->wakeup_dev_required = 0;
init_waitqueue_head(&priv->ds_awake_q);
priv->authtype_auto = 1;
-
+ priv->is_host_sleep_configured = 0;
+ priv->is_host_sleep_activated = 0;
+ init_waitqueue_head(&priv->host_sleep_q);
mutex_init(&priv->lock);
setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
@@ -976,6 +1002,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
priv->wol_criteria = 0xffffffff;
priv->wol_gpio = 0xff;
+ priv->wol_gap = 20;
goto done;
@@ -1031,6 +1058,10 @@ void lbs_remove_card(struct lbs_private *priv)
wake_up_interruptible(&priv->ds_awake_q);
}
+ priv->is_host_sleep_configured = 0;
+ priv->is_host_sleep_activated = 0;
+ wake_up_interruptible(&priv->host_sleep_q);
+
/* Stop the thread servicing the interrupts */
priv->surpriseremoved = 1;
kthread_stop(priv->main_thread);
diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h
index d16b26416e82..b3c8ea6d610e 100644
--- a/drivers/net/wireless/libertas/radiotap.h
+++ b/drivers/net/wireless/libertas/radiotap.h
@@ -6,7 +6,7 @@ struct tx_radiotap_hdr {
u8 txpower;
u8 rts_retries;
u8 data_retries;
-} __attribute__ ((packed));
+} __packed;
#define TX_RADIOTAP_PRESENT ( \
(1 << IEEE80211_RADIOTAP_RATE) | \
@@ -34,7 +34,7 @@ struct rx_radiotap_hdr {
u8 flags;
u8 rate;
u8 antsignal;
-} __attribute__ ((packed));
+} __packed;
#define RX_RADIOTAP_PRESENT ( \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 7a377f5b7662..1c63f8ce7349 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -15,7 +15,7 @@ struct eth803hdr {
u8 dest_addr[6];
u8 src_addr[6];
u16 h803_len;
-} __attribute__ ((packed));
+} __packed;
struct rfc1042hdr {
u8 llc_dsap;
@@ -23,17 +23,17 @@ struct rfc1042hdr {
u8 llc_ctrl;
u8 snap_oui[3];
u16 snap_type;
-} __attribute__ ((packed));
+} __packed;
struct rxpackethdr {
struct eth803hdr eth803_hdr;
struct rfc1042hdr rfc1042_hdr;
-} __attribute__ ((packed));
+} __packed;
struct rx80211packethdr {
struct rxpd rx_pd;
void *eth80211_hdr;
-} __attribute__ ((packed));
+} __packed;
static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 24cd54b3a806..7d82f13bdf1d 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -666,7 +666,7 @@ void lbs_scan_worker(struct work_struct *work)
/**
* @brief Interpret a BSS scan response returned from the firmware
*
- * Parse the various fixed fields and IEs passed back for a a BSS probe
+ * Parse the various fixed fields and IEs passed back for a BSS probe
* response or beacon from the scan command. Record information as needed
* in the scan table struct bss_descriptor for that entry.
*
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 3e72c86ceca8..462fbb4cb743 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -11,7 +11,7 @@
struct ieee_ie_header {
u8 id;
u8 len;
-} __attribute__ ((packed));
+} __packed;
struct ieee_ie_cf_param_set {
struct ieee_ie_header header;
@@ -20,19 +20,19 @@ struct ieee_ie_cf_param_set {
u8 cfpperiod;
__le16 cfpmaxduration;
__le16 cfpdurationremaining;
-} __attribute__ ((packed));
+} __packed;
struct ieee_ie_ibss_param_set {
struct ieee_ie_header header;
__le16 atimwindow;
-} __attribute__ ((packed));
+} __packed;
union ieee_ss_param_set {
struct ieee_ie_cf_param_set cf;
struct ieee_ie_ibss_param_set ibss;
-} __attribute__ ((packed));
+} __packed;
struct ieee_ie_fh_param_set {
struct ieee_ie_header header;
@@ -41,18 +41,18 @@ struct ieee_ie_fh_param_set {
u8 hopset;
u8 hoppattern;
u8 hopindex;
-} __attribute__ ((packed));
+} __packed;
struct ieee_ie_ds_param_set {
struct ieee_ie_header header;
u8 channel;
-} __attribute__ ((packed));
+} __packed;
union ieee_phy_param_set {
struct ieee_ie_fh_param_set fh;
struct ieee_ie_ds_param_set ds;
-} __attribute__ ((packed));
+} __packed;
/** TLV type ID definition */
#define PROPRIETARY_TLV_BASE_ID 0x0100
@@ -100,28 +100,28 @@ union ieee_phy_param_set {
struct mrvl_ie_header {
__le16 type;
__le16 len;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_data {
struct mrvl_ie_header header;
u8 Data[1];
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_rates_param_set {
struct mrvl_ie_header header;
u8 rates[1];
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_ssid_param_set {
struct mrvl_ie_header header;
u8 ssid[1];
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_wildcard_ssid_param_set {
struct mrvl_ie_header header;
u8 MaxSsidlength;
u8 ssid[1];
-} __attribute__ ((packed));
+} __packed;
struct chanscanmode {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -133,7 +133,7 @@ struct chanscanmode {
u8 disablechanfilt:1;
u8 reserved_2_7:6;
#endif
-} __attribute__ ((packed));
+} __packed;
struct chanscanparamset {
u8 radiotype;
@@ -141,12 +141,12 @@ struct chanscanparamset {
struct chanscanmode chanscanmode;
__le16 minscantime;
__le16 maxscantime;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_chanlist_param_set {
struct mrvl_ie_header header;
struct chanscanparamset chanscanparam[1];
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_cf_param_set {
struct mrvl_ie_header header;
@@ -154,86 +154,86 @@ struct mrvl_ie_cf_param_set {
u8 cfpperiod;
__le16 cfpmaxduration;
__le16 cfpdurationremaining;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_ds_param_set {
struct mrvl_ie_header header;
u8 channel;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_rsn_param_set {
struct mrvl_ie_header header;
u8 rsnie[1];
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_tsf_timestamp {
struct mrvl_ie_header header;
__le64 tsftable[1];
-} __attribute__ ((packed));
+} __packed;
/* v9 and later firmware only */
struct mrvl_ie_auth_type {
struct mrvl_ie_header header;
__le16 auth;
-} __attribute__ ((packed));
+} __packed;
/** Local Power capability */
struct mrvl_ie_power_capability {
struct mrvl_ie_header header;
s8 minpower;
s8 maxpower;
-} __attribute__ ((packed));
+} __packed;
/* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */
struct mrvl_ie_thresholds {
struct mrvl_ie_header header;
u8 value;
u8 freq;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_beacons_missed {
struct mrvl_ie_header header;
u8 beaconmissed;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_num_probes {
struct mrvl_ie_header header;
__le16 numprobes;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_bcast_probe {
struct mrvl_ie_header header;
__le16 bcastprobe;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_num_ssid_probe {
struct mrvl_ie_header header;
__le16 numssidprobe;
-} __attribute__ ((packed));
+} __packed;
struct led_pin {
u8 led;
u8 pin;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_ledgpio {
struct mrvl_ie_header header;
struct led_pin ledpin[1];
-} __attribute__ ((packed));
+} __packed;
struct led_bhv {
uint8_t firmwarestate;
uint8_t led;
uint8_t ledstate;
uint8_t ledarg;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_ie_ledbhv {
struct mrvl_ie_header header;
struct led_bhv ledbhv[1];
-} __attribute__ ((packed));
+} __packed;
/* Meant to be packed as the value member of a struct ieee80211_info_element.
* Note that the len member of the ieee80211_info_element varies depending on
@@ -248,12 +248,12 @@ struct mrvl_meshie_val {
uint8_t mesh_capability;
uint8_t mesh_id_len;
uint8_t mesh_id[IEEE80211_MAX_SSID_LEN];
-} __attribute__ ((packed));
+} __packed;
struct mrvl_meshie {
u8 id, len;
struct mrvl_meshie_val val;
-} __attribute__ ((packed));
+} __packed;
struct mrvl_mesh_defaults {
__le32 bootflag;
@@ -261,6 +261,6 @@ struct mrvl_mesh_defaults {
uint8_t reserved;
__le16 channel;
struct mrvl_meshie meshie;
-} __attribute__ ((packed));
+} __packed;
#endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index c445500ffc61..b172f5d87a3b 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -538,7 +538,8 @@ static void if_usb_receive_fwload(struct urb *urb)
return;
}
- syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
+ syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader),
+ GFP_ATOMIC);
if (!syncfwheader) {
lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
kfree_skb(skb);
@@ -546,8 +547,6 @@ static void if_usb_receive_fwload(struct urb *urb)
return;
}
- memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
-
if (!syncfwheader->cmd) {
lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index fbbaaae7a1ae..737eac92ef72 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -316,7 +316,7 @@ struct cmd_header {
__le16 size;
__le16 seqnum;
__le16 result;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ctrl_node {
struct list_head list;
@@ -369,7 +369,7 @@ struct cmd_ds_get_hw_spec {
/*FW/HW capability */
__le32 fwcapinfo;
-} __attribute__ ((packed));
+} __packed;
struct cmd_ds_mac_control {
struct cmd_header hdr;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6f8cb3ee6fed..49a7dfb4809a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -317,7 +317,7 @@ struct hwsim_radiotap_hdr {
u8 rt_rate;
__le16 rt_channel;
__le16 rt_chbitmask;
-} __attribute__ ((packed));
+} __packed;
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 808adb909095..c019fdc131c0 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -109,7 +109,7 @@ struct mwl8k_rx_queue {
dma_addr_t rxd_dma;
struct {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma)
+ DEFINE_DMA_UNMAP_ADDR(dma);
} *buf;
};
@@ -426,7 +426,7 @@ struct mwl8k_cmd_pkt {
__u8 macid;
__le16 result;
char payload[0];
-} __attribute__((packed));
+} __packed;
/*
* Firmware loading.
@@ -632,7 +632,7 @@ struct mwl8k_dma_data {
__le16 fwlen;
struct ieee80211_hdr wh;
char data[0];
-} __attribute__((packed));
+} __packed;
/* Routines to add/remove DMA header from skb. */
static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
@@ -711,7 +711,7 @@ struct mwl8k_rxd_8366_ap {
__u8 rx_status;
__u8 channel;
__u8 rx_ctrl;
-} __attribute__((packed));
+} __packed;
#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
@@ -806,7 +806,7 @@ struct mwl8k_rxd_sta {
__u8 rx_ctrl;
__u8 rx_status;
__u8 pad2[2];
-} __attribute__((packed));
+} __packed;
#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
@@ -963,7 +963,7 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
if (rxq->tail == MWL8K_RX_DESCS)
rxq->tail = 0;
rxq->buf[rx].skb = skb;
- pci_unmap_addr_set(&rxq->buf[rx], dma, addr);
+ dma_unmap_addr_set(&rxq->buf[rx], dma, addr);
rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size);
priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ);
@@ -984,9 +984,9 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
for (i = 0; i < MWL8K_RX_DESCS; i++) {
if (rxq->buf[i].skb != NULL) {
pci_unmap_single(priv->pdev,
- pci_unmap_addr(&rxq->buf[i], dma),
+ dma_unmap_addr(&rxq->buf[i], dma),
MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&rxq->buf[i], dma, 0);
+ dma_unmap_addr_set(&rxq->buf[i], dma, 0);
kfree_skb(rxq->buf[i].skb);
rxq->buf[i].skb = NULL;
@@ -1060,9 +1060,9 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
rxq->buf[rxq->head].skb = NULL;
pci_unmap_single(priv->pdev,
- pci_unmap_addr(&rxq->buf[rxq->head], dma),
+ dma_unmap_addr(&rxq->buf[rxq->head], dma),
MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
+ dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
rxq->head++;
if (rxq->head == MWL8K_RX_DESCS)
@@ -1120,7 +1120,7 @@ struct mwl8k_tx_desc {
__le16 rate_info;
__u8 peer_id;
__u8 tx_frag_cnt;
-} __attribute__((packed));
+} __packed;
#define MWL8K_TX_DESCS 128
@@ -1666,7 +1666,7 @@ struct mwl8k_cmd_get_hw_spec_sta {
__le32 caps2;
__le32 num_tx_desc_per_queue;
__le32 total_rxd;
-} __attribute__((packed));
+} __packed;
#define MWL8K_CAP_MAX_AMSDU 0x20000000
#define MWL8K_CAP_GREENFIELD 0x08000000
@@ -1810,7 +1810,7 @@ struct mwl8k_cmd_get_hw_spec_ap {
__le32 wcbbase1;
__le32 wcbbase2;
__le32 wcbbase3;
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
{
@@ -1883,7 +1883,7 @@ struct mwl8k_cmd_set_hw_spec {
__le32 flags;
__le32 num_tx_desc_per_queue;
__le32 total_rxd;
-} __attribute__((packed));
+} __packed;
#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
@@ -1985,7 +1985,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
struct mwl8k_cmd_get_stat {
struct mwl8k_cmd_pkt header;
__le32 stats[64];
-} __attribute__((packed));
+} __packed;
#define MWL8K_STAT_ACK_FAILURE 9
#define MWL8K_STAT_RTS_FAILURE 12
@@ -2029,7 +2029,7 @@ struct mwl8k_cmd_radio_control {
__le16 action;
__le16 control;
__le16 radio_on;
-} __attribute__((packed));
+} __packed;
static int
mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
@@ -2092,7 +2092,7 @@ struct mwl8k_cmd_rf_tx_power {
__le16 current_level;
__le16 reserved;
__le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
{
@@ -2121,7 +2121,7 @@ struct mwl8k_cmd_rf_antenna {
struct mwl8k_cmd_pkt header;
__le16 antenna;
__le16 mode;
-} __attribute__((packed));
+} __packed;
#define MWL8K_RF_ANTENNA_RX 1
#define MWL8K_RF_ANTENNA_TX 2
@@ -2182,7 +2182,7 @@ static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
*/
struct mwl8k_cmd_set_pre_scan {
struct mwl8k_cmd_pkt header;
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
{
@@ -2209,7 +2209,7 @@ struct mwl8k_cmd_set_post_scan {
struct mwl8k_cmd_pkt header;
__le32 isibss;
__u8 bssid[ETH_ALEN];
-} __attribute__((packed));
+} __packed;
static int
mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
@@ -2240,7 +2240,7 @@ struct mwl8k_cmd_set_rf_channel {
__le16 action;
__u8 current_channel;
__le32 channel_flags;
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
struct ieee80211_conf *conf)
@@ -2293,7 +2293,7 @@ struct mwl8k_cmd_update_set_aid {
__u8 bssid[ETH_ALEN];
__le16 protection_mode;
__u8 supp_rates[14];
-} __attribute__((packed));
+} __packed;
static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
{
@@ -2364,7 +2364,7 @@ struct mwl8k_cmd_set_rate {
/* Bitmap for supported MCS codes. */
__u8 mcs_set[16];
__u8 reserved[16];
-} __attribute__((packed));
+} __packed;
static int
mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -2397,7 +2397,7 @@ struct mwl8k_cmd_finalize_join {
struct mwl8k_cmd_pkt header;
__le32 sleep_interval; /* Number of beacon periods to sleep */
__u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
int framelen, int dtim)
@@ -2436,7 +2436,7 @@ struct mwl8k_cmd_set_rts_threshold {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 threshold;
-} __attribute__((packed));
+} __packed;
static int
mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
@@ -2466,7 +2466,7 @@ struct mwl8k_cmd_set_slot {
struct mwl8k_cmd_pkt header;
__le16 action;
__u8 short_slot;
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
{
@@ -2528,7 +2528,7 @@ struct mwl8k_cmd_set_edca_params {
__u8 txq;
} sta;
};
-} __attribute__((packed));
+} __packed;
#define MWL8K_SET_EDCA_CW 0x01
#define MWL8K_SET_EDCA_TXOP 0x02
@@ -2579,7 +2579,7 @@ mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
struct mwl8k_cmd_set_wmm_mode {
struct mwl8k_cmd_pkt header;
__le16 action;
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
{
@@ -2612,7 +2612,7 @@ struct mwl8k_cmd_mimo_config {
__le32 action;
__u8 rx_antenna_map;
__u8 tx_antenna_map;
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
{
@@ -2652,7 +2652,7 @@ struct mwl8k_cmd_use_fixed_rate_sta {
__le32 rate_type;
__le32 reserved1;
__le32 reserved2;
-} __attribute__((packed));
+} __packed;
#define MWL8K_USE_AUTO_RATE 0x0002
#define MWL8K_UCAST_RATE 0
@@ -2694,7 +2694,7 @@ struct mwl8k_cmd_use_fixed_rate_ap {
u8 multicast_rate;
u8 multicast_rate_type;
u8 management_rate;
-} __attribute__((packed));
+} __packed;
static int
mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
@@ -2724,7 +2724,7 @@ mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
struct mwl8k_cmd_enable_sniffer {
struct mwl8k_cmd_pkt header;
__le32 action;
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
{
@@ -2757,7 +2757,7 @@ struct mwl8k_cmd_set_mac_addr {
} mbss;
__u8 mac_addr[ETH_ALEN];
};
-} __attribute__((packed));
+} __packed;
#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
@@ -2812,7 +2812,7 @@ struct mwl8k_cmd_set_rate_adapt_mode {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 mode;
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
{
@@ -2840,7 +2840,7 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
struct mwl8k_cmd_bss_start {
struct mwl8k_cmd_pkt header;
__le32 enable;
-} __attribute__((packed));
+} __packed;
static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int enable)
@@ -2885,7 +2885,7 @@ struct mwl8k_cmd_set_new_stn {
__u8 add_qos_info;
__u8 is_qos_sta;
__le32 fw_sta_ptr;
-} __attribute__((packed));
+} __packed;
#define MWL8K_STA_ACTION_ADD 0
#define MWL8K_STA_ACTION_REMOVE 2
@@ -2978,7 +2978,7 @@ struct ewc_ht_info {
__le16 control1;
__le16 control2;
__le16 control3;
-} __attribute__((packed));
+} __packed;
struct peer_capability_info {
/* Peer type - AP vs. STA. */
@@ -3007,7 +3007,7 @@ struct peer_capability_info {
__u8 pad2;
__u8 station_id;
__le16 amsdu_enabled;
-} __attribute__((packed));
+} __packed;
struct mwl8k_cmd_update_stadb {
struct mwl8k_cmd_pkt header;
@@ -3022,7 +3022,7 @@ struct mwl8k_cmd_update_stadb {
/* Peer info - valid during add/update. */
struct peer_capability_info peer_info;
-} __attribute__((packed));
+} __packed;
#define MWL8K_STA_DB_MODIFY_ENTRY 1
#define MWL8K_STA_DB_DEL_ENTRY 2
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 3e1947d097ca..259d75853984 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -49,7 +49,7 @@ struct orinoco_fw_header {
__le32 pri_offset; /* Offset to primary plug data */
__le32 compat_offset; /* Offset to compatibility data*/
char signature[0]; /* FW signature length headersize-20 */
-} __attribute__ ((packed));
+} __packed;
/* Check the range of various header entries. Return a pointer to a
* description of the problem, or NULL if everything checks out. */
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 9ca34e722b45..d9f18c11682a 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -205,7 +205,7 @@ struct hermes_tx_descriptor {
u8 retry_count;
u8 tx_rate;
__le16 tx_control;
-} __attribute__ ((packed));
+} __packed;
#define HERMES_TXSTAT_RETRYERR (0x0001)
#define HERMES_TXSTAT_AGEDERR (0x0002)
@@ -254,7 +254,7 @@ struct hermes_tallies_frame {
/* Those last are probably not available in very old firmwares */
__le16 RxDiscards_WEPICVError;
__le16 RxDiscards_WEPExcluded;
-} __attribute__ ((packed));
+} __packed;
/* Grabbed from wlan-ng - Thanks Mark... - Jean II
* This is the result of a scan inquiry command */
@@ -271,7 +271,7 @@ struct prism2_scan_apinfo {
u8 rates[10]; /* Bit rate supported */
__le16 proberesp_rate; /* Data rate of the response frame */
__le16 atim; /* ATIM window time, Kus (hostscan only) */
-} __attribute__ ((packed));
+} __packed;
/* Same stuff for the Lucent/Agere card.
* Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
@@ -285,7 +285,7 @@ struct agere_scan_apinfo {
/* bits: 0-ess, 1-ibss, 4-privacy [wep] */
__le16 essid_len; /* ESSID length */
u8 essid[32]; /* ESSID of the network */
-} __attribute__ ((packed));
+} __packed;
/* Moustafa: Scan structure for Symbol cards */
struct symbol_scan_apinfo {
@@ -303,7 +303,7 @@ struct symbol_scan_apinfo {
__le16 basic_rates; /* Basic rates bitmask */
u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */
u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */
-} __attribute__ ((packed));
+} __packed;
union hermes_scan_info {
struct agere_scan_apinfo a;
@@ -343,7 +343,7 @@ struct agere_ext_scan_info {
__le16 beacon_interval;
__le16 capabilities;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
#define HERMES_LINKSTATUS_CONNECTED (0x0001)
@@ -355,7 +355,7 @@ struct agere_ext_scan_info {
struct hermes_linkstatus {
__le16 linkstatus; /* Link status */
-} __attribute__ ((packed));
+} __packed;
struct hermes_response {
u16 status, resp0, resp1, resp2;
@@ -365,11 +365,11 @@ struct hermes_response {
struct hermes_idstring {
__le16 len;
__le16 val[16];
-} __attribute__ ((packed));
+} __packed;
struct hermes_multicast {
u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
/* Timeouts */
#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index 6da85e75fce0..2b2b9a1a979c 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -65,10 +65,10 @@ struct dblock {
__le32 addr; /* adapter address where to write the block */
__le16 len; /* length of the data only, in bytes */
char data[0]; /* data to be written */
-} __attribute__ ((packed));
+} __packed;
/*
- * Plug Data References are located in in the image after the last data
+ * Plug Data References are located in the image after the last data
* block. They refer to areas in the adapter memory where the plug data
* items with matching ID should be written.
*/
@@ -77,7 +77,7 @@ struct pdr {
__le32 addr; /* adapter address where to write the data */
__le32 len; /* expected length of the data, in bytes */
char next[0]; /* next PDR starts here */
-} __attribute__ ((packed));
+} __packed;
/*
* Plug Data Items are located in the EEPROM read from the adapter by
@@ -88,7 +88,7 @@ struct pdi {
__le16 len; /* length of ID and data, in words */
__le16 id; /* record ID */
char data[0]; /* plug data */
-} __attribute__ ((packed));
+} __packed;
/*** FW data block access functions ***/
@@ -317,7 +317,7 @@ static const struct { \
__le16 len; \
__le16 id; \
u8 val[length]; \
-} __attribute__ ((packed)) default_pdr_data_##pid = { \
+} __packed default_pdr_data_##pid = { \
cpu_to_le16((sizeof(default_pdr_data_##pid)/ \
sizeof(__le16)) - 1), \
cpu_to_le16(pid), \
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 6fbd78850123..077baa86756b 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -45,7 +45,7 @@ static const struct {
/* Firmware version encoding */
struct comp_id {
u16 id, variant, major, minor;
-} __attribute__ ((packed));
+} __packed;
static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
{
@@ -995,7 +995,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
u8 tx_mic[MIC_KEYLEN];
u8 rx_mic[MIC_KEYLEN];
u8 tsc[ORINOCO_SEQ_LEN];
- } __attribute__ ((packed)) buf;
+ } __packed buf;
hermes_t *hw = &priv->hw;
int ret;
int err;
@@ -1326,7 +1326,7 @@ int orinoco_hw_disassociate(struct orinoco_private *priv,
struct {
u8 addr[ETH_ALEN];
__le16 reason_code;
- } __attribute__ ((packed)) buf;
+ } __packed buf;
/* Currently only supported by WPA enabled Agere fw */
if (!priv->has_wpa)
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index ca71f08709bc..e8e2d0f4763d 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -172,7 +172,7 @@ struct hermes_txexc_data {
__le16 frame_ctl;
__le16 duration_id;
u8 addr1[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
/* Rx frame header except compatibility 802.3 header */
struct hermes_rx_descriptor {
@@ -196,7 +196,7 @@ struct hermes_rx_descriptor {
/* Data length */
__le16 data_len;
-} __attribute__ ((packed));
+} __packed;
struct orinoco_rx_data {
struct hermes_rx_descriptor *desc;
@@ -390,7 +390,7 @@ int orinoco_process_xmit_skb(struct sk_buff *skb,
struct header_struct {
struct ethhdr eth; /* 802.3 header */
u8 encap[6]; /* 802.2 header */
- } __attribute__ ((packed)) hdr;
+ } __packed hdr;
int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
@@ -1170,7 +1170,7 @@ static void orinoco_join_ap(struct work_struct *work)
struct join_req {
u8 bssid[ETH_ALEN];
__le16 channel;
- } __attribute__ ((packed)) req;
+ } __packed req;
const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
struct prism2_scan_apinfo *atom = NULL;
int offset = 4;
@@ -1410,7 +1410,7 @@ void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
struct {
__le16 len;
__le16 type;
- } __attribute__ ((packed)) info;
+ } __packed info;
int len, type;
int err;
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index a6da86e0a70f..255710ef082a 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -32,7 +32,7 @@
struct orinoco_key {
__le16 len; /* always stored as little-endian */
char data[ORINOCO_MAX_KEY_SIZE];
-} __attribute__ ((packed));
+} __packed;
#define TKIP_KEYLEN 16
#define MIC_KEYLEN 8
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 78f089baa8c9..1558381998ee 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -90,7 +90,7 @@ struct header_struct {
/* SNAP */
u8 oui[3];
__be16 ethertype;
-} __attribute__ ((packed));
+} __packed;
struct ez_usb_fw {
u16 size;
@@ -222,7 +222,7 @@ struct ezusb_packet {
__le16 hermes_len;
__le16 hermes_rid;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
/* Table of devices that work or may work with this driver */
static struct usb_device_id ezusb_table[] = {
@@ -356,12 +356,10 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
{
struct request_context *ctx;
- ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
return NULL;
- memset(ctx, 0, sizeof(*ctx));
-
ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC);
if (!ctx->buf) {
kfree(ctx);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 5775124e2aee..cf7be1eb6124 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -128,7 +128,7 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
} else {
struct {
__le16 qual, signal, noise, unused;
- } __attribute__ ((packed)) cq;
+ } __packed cq;
err = HERMES_READ_RECORD(hw, USER_BAP,
HERMES_RID_COMMSQUALITY, &cq);
@@ -993,11 +993,9 @@ static int orinoco_ioctl_set_genie(struct net_device *dev,
return -EINVAL;
if (wrqu->data.length) {
- buf = kmalloc(wrqu->data.length, GFP_KERNEL);
+ buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
-
- memcpy(buf, extra, wrqu->data.length);
} else
buf = NULL;
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 187e263b045a..e51650ed49f2 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -599,13 +599,13 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
}
break;
case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
- priv->iq_autocal = kmalloc(data_len, GFP_KERNEL);
+ priv->iq_autocal = kmemdup(entry->data, data_len,
+ GFP_KERNEL);
if (!priv->iq_autocal) {
err = -ENOMEM;
goto err;
}
- memcpy(priv->iq_autocal, entry->data, data_len);
priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry);
break;
case PDR_DEFAULT_COUNTRY:
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
index 4915d9d54203..e3ed893b5aaf 100644
--- a/drivers/net/wireless/p54/net2280.h
+++ b/drivers/net/wireless/p54/net2280.h
@@ -232,7 +232,7 @@ struct net2280_regs {
#define GPIO2_INTERRUPT 2
#define GPIO1_INTERRUPT 1
#define GPIO0_INTERRUPT 0
-} __attribute__ ((packed));
+} __packed;
/* usb control, BAR0 + 0x0080 */
struct net2280_usb_regs {
@@ -296,7 +296,7 @@ struct net2280_usb_regs {
#define FORCE_IMMEDIATE 7
#define OUR_USB_ADDRESS 0
__le32 ourconfig;
-} __attribute__ ((packed));
+} __packed;
/* pci control, BAR0 + 0x0100 */
struct net2280_pci_regs {
@@ -323,7 +323,7 @@ struct net2280_pci_regs {
#define PCI_ARBITER_CLEAR 2
#define PCI_EXTERNAL_ARBITER 1
#define PCI_HOST_MODE 0
-} __attribute__ ((packed));
+} __packed;
/* dma control, BAR0 + 0x0180 ... array of four structs like this,
* for channels 0..3. see also struct net2280_dma: descriptor
@@ -364,7 +364,7 @@ struct net2280_dma_regs { /* [11.7] */
__le32 dmaaddr;
__le32 dmadesc;
u32 _unused1;
-} __attribute__ ((packed));
+} __packed;
/* dedicated endpoint registers, BAR0 + 0x0200 */
@@ -374,7 +374,7 @@ struct net2280_dep_regs { /* [11.8] */
/* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */
__le32 dep_rsp;
u32 _unused[2];
-} __attribute__ ((packed));
+} __packed;
/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
* like this, for ep0 then the configurable endpoints A..F
@@ -437,16 +437,16 @@ struct net2280_ep_regs { /* [11.9] */
__le32 ep_avail;
__le32 ep_data;
u32 _unused0[2];
-} __attribute__ ((packed));
+} __packed;
struct net2280_reg_write {
__le16 port;
__le32 addr;
__le32 val;
-} __attribute__ ((packed));
+} __packed;
struct net2280_reg_read {
__le16 port;
__le32 addr;
-} __attribute__ ((packed));
+} __packed;
#endif /* NET2280_H */
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 2feead617a3b..ee9bc62a4fa2 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -65,7 +65,7 @@ struct p54p_csr {
u8 unused_6[1924];
u8 cardbus_cis[0x800];
u8 direct_mem_win[0x1000];
-} __attribute__ ((packed));
+} __packed;
/* usb backend only needs the register defines above */
#ifndef P54USB_H
@@ -74,7 +74,7 @@ struct p54p_desc {
__le32 device_addr;
__le16 len;
__le16 flags;
-} __attribute__ ((packed));
+} __packed;
struct p54p_ring_control {
__le32 host_idx[4];
@@ -83,7 +83,7 @@ struct p54p_ring_control {
struct p54p_desc tx_data[32];
struct p54p_desc rx_mgmt[4];
struct p54p_desc tx_mgmt[4];
-} __attribute__ ((packed));
+} __packed;
#define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r)
#define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r)
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index c8f09da1f84d..087bf0698a5a 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -697,9 +697,7 @@ static int __devexit p54spi_remove(struct spi_device *spi)
static struct spi_driver p54spi_driver = {
.driver = {
- /* use cx3110x name because board-n800.c uses that for the
- * SPI port */
- .name = "cx3110x",
+ .name = "p54spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
@@ -733,3 +731,4 @@ module_exit(p54spi_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
MODULE_ALIAS("spi:cx3110x");
+MODULE_ALIAS("spi:p54spi");
diff --git a/drivers/net/wireless/p54/p54spi.h b/drivers/net/wireless/p54/p54spi.h
index 7fbe8d8fc67c..dfaa62aaeb07 100644
--- a/drivers/net/wireless/p54/p54spi.h
+++ b/drivers/net/wireless/p54/p54spi.h
@@ -96,7 +96,7 @@ struct p54s_dma_regs {
__le16 cmd;
__le16 len;
__le32 addr;
-} __attribute__ ((packed));
+} __packed;
struct p54s_tx_info {
struct list_head tx_list;
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 73073259f508..ad595958b7df 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -69,7 +69,8 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */
{USB_DEVICE(0x0baf, 0x0118)}, /* U.S. Robotics U5 802.11g Adapter*/
{USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/
- {USB_DEVICE(0x0cde, 0x0006)}, /* Medion MD40900 */
+ /* {USB_DEVICE(0x0cde, 0x0006)}, * Medion MD40900 already listed above,
+ * just noting it here for clarity */
{USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */
{USB_DEVICE(0x0cde, 0x0015)}, /* Zcomax XG-705A */
{USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */
@@ -434,10 +435,9 @@ static int p54u_firmware_reset_3887(struct ieee80211_hw *dev)
u8 *buf;
int ret;
- buf = kmalloc(4, GFP_KERNEL);
+ buf = kmemdup(p54u_romboot_3887, 4, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- memcpy(buf, p54u_romboot_3887, 4);
ret = p54u_bulk_msg(priv, P54U_PIPE_DATA,
buf, 4);
kfree(buf);
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index e935b79f7f75..ed4034ade59a 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -70,12 +70,12 @@ struct net2280_tx_hdr {
__le16 len;
__le16 follower; /* ? */
u8 padding[8];
-} __attribute__((packed));
+} __packed;
struct lm87_tx_hdr {
__le32 device_addr;
__le32 chksum;
-} __attribute__((packed));
+} __packed;
/* Some flags for the isl hardware registers controlling DMA inside the
* chip */
@@ -103,7 +103,7 @@ struct x2_header {
__le32 fw_load_addr;
__le32 fw_length;
__le32 crc;
-} __attribute__((packed));
+} __packed;
/* pipes 3 and 4 are not used by the driver */
#define P54U_PIPE_NUMBER 9
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 8d1190c0f062..912fdc022d08 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2101,7 +2101,7 @@ struct ieee80211_beacon_phdr {
u8 timestamp[8];
u16 beacon_int;
u16 capab_info;
-} __attribute__ ((packed));
+} __packed;
#define WLAN_EID_GENERIC 0xdd
static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 };
@@ -2751,14 +2751,9 @@ prism54_hostapd(struct net_device *ndev, struct iw_point *p)
p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = kmalloc(p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param))
+ return PTR_ERR(param);
switch (param->cmd) {
case PRISM2_SET_ENCRYPTION:
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
index b7534c2869c8..59e31258d450 100644
--- a/drivers/net/wireless/prism54/isl_oid.h
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -29,20 +29,20 @@
struct obj_ssid {
u8 length;
char octets[33];
-} __attribute__ ((packed));
+} __packed;
struct obj_key {
u8 type; /* dot11_priv_t */
u8 length;
char key[32];
-} __attribute__ ((packed));
+} __packed;
struct obj_mlme {
u8 address[6];
u16 id;
u16 state;
u16 code;
-} __attribute__ ((packed));
+} __packed;
struct obj_mlmeex {
u8 address[6];
@@ -51,12 +51,12 @@ struct obj_mlmeex {
u16 code;
u16 size;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
struct obj_buffer {
u32 size;
u32 addr; /* 32bit bus address */
-} __attribute__ ((packed));
+} __packed;
struct obj_bss {
u8 address[6];
@@ -77,17 +77,17 @@ struct obj_bss {
short rates;
short basic_rates;
int:16; /* padding */
-} __attribute__ ((packed));
+} __packed;
struct obj_bsslist {
u32 nr;
struct obj_bss bsslist[0];
-} __attribute__ ((packed));
+} __packed;
struct obj_frequencies {
u16 nr;
u16 mhz[0];
-} __attribute__ ((packed));
+} __packed;
struct obj_attachment {
char type;
@@ -95,7 +95,7 @@ struct obj_attachment {
short id;
short size;
char data[0];
-} __attribute__((packed));
+} __packed;
/*
* in case everything's ok, the inlined function below will be
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 54f9a4b7bf9b..6ca30a5b7bfb 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -34,13 +34,13 @@ struct rfmon_header {
__le16 unk3;
u8 rssi;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
struct rx_annex_header {
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
struct rfmon_header rfmon;
-} __attribute__ ((packed));
+} __packed;
/* wlan-ng (and hopefully others) AVS header, version one. Fields in
* network byte order. */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 0b27e50fe0d5..0db93db9b675 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -101,7 +101,7 @@ typedef struct {
u8 device_id;
u8 flags;
u32 length;
-} __attribute__ ((packed))
+} __packed
pimfor_header_t;
/* A received and interrupt-processed management frame, either for
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 4bd61ee627c0..5e7f344b000d 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -238,19 +238,19 @@ struct ndis_80211_auth_request {
u8 bssid[6];
u8 padding[2];
__le32 flags;
-} __attribute__((packed));
+} __packed;
struct ndis_80211_pmkid_candidate {
u8 bssid[6];
u8 padding[2];
__le32 flags;
-} __attribute__((packed));
+} __packed;
struct ndis_80211_pmkid_cand_list {
__le32 version;
__le32 num_candidates;
struct ndis_80211_pmkid_candidate candidate_list[0];
-} __attribute__((packed));
+} __packed;
struct ndis_80211_status_indication {
__le32 status_type;
@@ -260,19 +260,19 @@ struct ndis_80211_status_indication {
struct ndis_80211_auth_request auth_request[0];
struct ndis_80211_pmkid_cand_list cand_list;
} u;
-} __attribute__((packed));
+} __packed;
struct ndis_80211_ssid {
__le32 length;
u8 essid[NDIS_802_11_LENGTH_SSID];
-} __attribute__((packed));
+} __packed;
struct ndis_80211_conf_freq_hop {
__le32 length;
__le32 hop_pattern;
__le32 hop_set;
__le32 dwell_time;
-} __attribute__((packed));
+} __packed;
struct ndis_80211_conf {
__le32 length;
@@ -280,7 +280,7 @@ struct ndis_80211_conf {
__le32 atim_window;
__le32 ds_config;
struct ndis_80211_conf_freq_hop fh_config;
-} __attribute__((packed));
+} __packed;
struct ndis_80211_bssid_ex {
__le32 length;
@@ -295,25 +295,25 @@ struct ndis_80211_bssid_ex {
u8 rates[NDIS_802_11_LENGTH_RATES_EX];
__le32 ie_length;
u8 ies[0];
-} __attribute__((packed));
+} __packed;
struct ndis_80211_bssid_list_ex {
__le32 num_items;
struct ndis_80211_bssid_ex bssid[0];
-} __attribute__((packed));
+} __packed;
struct ndis_80211_fixed_ies {
u8 timestamp[8];
__le16 beacon_interval;
__le16 capabilities;
-} __attribute__((packed));
+} __packed;
struct ndis_80211_wep_key {
__le32 size;
__le32 index;
__le32 length;
u8 material[32];
-} __attribute__((packed));
+} __packed;
struct ndis_80211_key {
__le32 size;
@@ -323,14 +323,14 @@ struct ndis_80211_key {
u8 padding[6];
u8 rsc[8];
u8 material[32];
-} __attribute__((packed));
+} __packed;
struct ndis_80211_remove_key {
__le32 size;
__le32 index;
u8 bssid[6];
u8 padding[2];
-} __attribute__((packed));
+} __packed;
struct ndis_config_param {
__le32 name_offs;
@@ -338,7 +338,7 @@ struct ndis_config_param {
__le32 type;
__le32 value_offs;
__le32 value_length;
-} __attribute__((packed));
+} __packed;
struct ndis_80211_assoc_info {
__le32 length;
@@ -358,12 +358,12 @@ struct ndis_80211_assoc_info {
} resp_ie;
__le32 resp_ie_length;
__le32 offset_resp_ies;
-} __attribute__((packed));
+} __packed;
struct ndis_80211_auth_encr_pair {
__le32 auth_mode;
__le32 encr_mode;
-} __attribute__((packed));
+} __packed;
struct ndis_80211_capability {
__le32 length;
@@ -371,7 +371,7 @@ struct ndis_80211_capability {
__le32 num_pmkids;
__le32 num_auth_encr_pair;
struct ndis_80211_auth_encr_pair auth_encr_pair[0];
-} __attribute__((packed));
+} __packed;
struct ndis_80211_bssid_info {
u8 bssid[6];
@@ -2495,8 +2495,7 @@ static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ndis_80211_assoc_info *info;
- u8 assoc_buf[sizeof(*info) + IW_CUSTOM_MAX + 32];
+ struct ndis_80211_assoc_info *info = NULL;
u8 bssid[ETH_ALEN];
int resp_ie_len, req_ie_len;
u8 *req_ie, *resp_ie;
@@ -2515,23 +2514,43 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
resp_ie = NULL;
if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
- memset(assoc_buf, 0, sizeof(assoc_buf));
- info = (void *)assoc_buf;
+ info = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
+ if (!info) {
+ /* No memory? Try resume work later */
+ set_bit(WORK_LINK_UP, &priv->work_pending);
+ queue_work(priv->workqueue, &priv->work);
+ return;
+ }
- /* Get association info IEs from device and send them back to
- * userspace. */
- ret = get_association_info(usbdev, info, sizeof(assoc_buf));
+ /* Get association info IEs from device. */
+ ret = get_association_info(usbdev, info, CONTROL_BUFFER_SIZE);
if (!ret) {
req_ie_len = le32_to_cpu(info->req_ie_length);
if (req_ie_len > 0) {
offset = le32_to_cpu(info->offset_req_ies);
+
+ if (offset > CONTROL_BUFFER_SIZE)
+ offset = CONTROL_BUFFER_SIZE;
+
req_ie = (u8 *)info + offset;
+
+ if (offset + req_ie_len > CONTROL_BUFFER_SIZE)
+ req_ie_len =
+ CONTROL_BUFFER_SIZE - offset;
}
resp_ie_len = le32_to_cpu(info->resp_ie_length);
if (resp_ie_len > 0) {
offset = le32_to_cpu(info->offset_resp_ies);
+
+ if (offset > CONTROL_BUFFER_SIZE)
+ offset = CONTROL_BUFFER_SIZE;
+
resp_ie = (u8 *)info + offset;
+
+ if (offset + resp_ie_len > CONTROL_BUFFER_SIZE)
+ resp_ie_len =
+ CONTROL_BUFFER_SIZE - offset;
}
}
} else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC))
@@ -2563,6 +2582,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
+ if (info != NULL)
+ kfree(info);
+
priv->connected = true;
memcpy(priv->bssid, bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index ad2c98af7e9d..1eb882e15fb4 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1076,9 +1076,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct queue_entry_priv_pci *entry_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- u32 word;
u32 reg;
/*
@@ -1091,9 +1088,15 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
rt2x00queue_map_txskb(rt2x00dev, entry->skb);
- rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
- rt2x00_desc_write(entry_priv->desc, 1, word);
+ /*
+ * Write the TX descriptor for the beacon.
+ */
+ rt2400pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+ /*
+ * Dump beacon to userspace through debugfs.
+ */
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
/*
* Enable beaconing again.
@@ -1226,7 +1229,7 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
}
txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
- rt2x00lib_txdone(entry, &txdesc);
+ rt2x00pci_txdone(entry, &txdesc);
}
}
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 41da3d218c65..a29cb212f89a 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1233,9 +1233,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct queue_entry_priv_pci *entry_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- u32 word;
u32 reg;
/*
@@ -1248,9 +1245,15 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
rt2x00queue_map_txskb(rt2x00dev, entry->skb);
- rt2x00_desc_read(entry_priv->desc, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
- rt2x00_desc_write(entry_priv->desc, 1, word);
+ /*
+ * Write the TX descriptor for the beacon.
+ */
+ rt2500pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+ /*
+ * Dump beacon to userspace through debugfs.
+ */
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
/*
* Enable beaconing again.
@@ -1362,7 +1365,7 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
}
txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
- rt2x00lib_txdone(entry, &txdesc);
+ rt2x00pci_txdone(entry, &txdesc);
}
}
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9ae96a626e6d..002db646ae0b 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -345,7 +345,6 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
- int timeout;
u32 mask;
u16 reg;
@@ -367,18 +366,8 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
key->hw_key_idx += reg ? ffz(reg) : 0;
- /*
- * The encryption key doesn't fit within the CSR cache,
- * this means we should allocate it separately and use
- * rt2x00usb_vendor_request() to send the key to the hardware.
- */
- reg = KEY_ENTRY(key->hw_key_idx);
- timeout = REGISTER_TIMEOUT32(sizeof(crypto->key));
- rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
- USB_VENDOR_REQUEST_OUT, reg,
- crypto->key,
- sizeof(crypto->key),
- timeout);
+ rt2500usb_register_multiwrite(rt2x00dev, reg,
+ crypto->key, sizeof(crypto->key));
/*
* The driver does not support the IV/EIV generation
@@ -1034,7 +1023,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
+ __le32 *txd = (__le32 *) skb->data;
u32 word;
/*
@@ -1080,6 +1069,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* Register descriptor details in skb frame descriptor.
*/
+ skbdesc->flags |= SKBDESC_DESC_IN_SKB;
skbdesc->desc = txd;
skbdesc->desc_len = TXD_DESC_SIZE;
}
@@ -1108,9 +1098,20 @@ static void rt2500usb_write_beacon(struct queue_entry *entry,
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
/*
- * Take the descriptor in front of the skb into account.
+ * Add space for the descriptor in front of the skb.
*/
skb_push(entry->skb, TXD_DESC_SIZE);
+ memset(entry->skb->data, 0, TXD_DESC_SIZE);
+
+ /*
+ * Write the TX descriptor for the beacon.
+ */
+ rt2500usb_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+ /*
+ * Dump beacon to userspace through debugfs.
+ */
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
/*
* USB devices cannot blindly pass the skb->len as the
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 2aa03751c341..552f9f4c73d6 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -63,7 +63,6 @@
*/
#define REV_RT2860C 0x0100
#define REV_RT2860D 0x0101
-#define REV_RT2870D 0x0101
#define REV_RT2872E 0x0200
#define REV_RT3070E 0x0200
#define REV_RT3070F 0x0201
@@ -99,6 +98,21 @@
*/
/*
+ * E2PROM_CSR: PCI EEPROM control register.
+ * RELOAD: Write 1 to reload eeprom content.
+ * TYPE: 0: 93c46, 1:93c66.
+ * LOAD_STATUS: 1:loading, 0:done.
+ */
+#define E2PROM_CSR 0x0004
+#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001)
+#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002)
+#define E2PROM_CSR_DATA_IN FIELD32(0x00000004)
+#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008)
+#define E2PROM_CSR_TYPE FIELD32(0x00000030)
+#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040)
+#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
+
+/*
* OPT_14: Unknown register used by rt3xxx devices.
*/
#define OPT_14_CSR 0x0114
@@ -322,6 +336,39 @@
#define RX_DRX_IDX 0x029c
/*
+ * USB_DMA_CFG
+ * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
+ * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
+ * PHY_CLEAR: phy watch dog enable.
+ * TX_CLEAR: Clear USB DMA TX path.
+ * TXOP_HALT: Halt TXOP count down when TX buffer is full.
+ * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation.
+ * RX_BULK_EN: Enable USB DMA Rx.
+ * TX_BULK_EN: Enable USB DMA Tx.
+ * EP_OUT_VALID: OUT endpoint data valid.
+ * RX_BUSY: USB DMA RX FSM busy.
+ * TX_BUSY: USB DMA TX FSM busy.
+ */
+#define USB_DMA_CFG 0x02a0
+#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT FIELD32(0x000000ff)
+#define USB_DMA_CFG_RX_BULK_AGG_LIMIT FIELD32(0x0000ff00)
+#define USB_DMA_CFG_PHY_CLEAR FIELD32(0x00010000)
+#define USB_DMA_CFG_TX_CLEAR FIELD32(0x00080000)
+#define USB_DMA_CFG_TXOP_HALT FIELD32(0x00100000)
+#define USB_DMA_CFG_RX_BULK_AGG_EN FIELD32(0x00200000)
+#define USB_DMA_CFG_RX_BULK_EN FIELD32(0x00400000)
+#define USB_DMA_CFG_TX_BULK_EN FIELD32(0x00800000)
+#define USB_DMA_CFG_EP_OUT_VALID FIELD32(0x3f000000)
+#define USB_DMA_CFG_RX_BUSY FIELD32(0x40000000)
+#define USB_DMA_CFG_TX_BUSY FIELD32(0x80000000)
+
+/*
+ * US_CYC_CNT
+ */
+#define US_CYC_CNT 0x02a4
+#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff)
+
+/*
* PBF_SYS_CTRL
* HOST_RAM_WRITE: enable Host program ram write selection
*/
@@ -1370,17 +1417,17 @@
struct mac_wcid_entry {
u8 mac[6];
u8 reserved[2];
-} __attribute__ ((packed));
+} __packed;
struct hw_key_entry {
u8 key[16];
u8 tx_mic[8];
u8 rx_mic[8];
-} __attribute__ ((packed));
+} __packed;
struct mac_iveiv_entry {
u8 iv[8];
-} __attribute__ ((packed));
+} __packed;
/*
* MAC_WCID_ATTRIBUTE:
@@ -1389,6 +1436,10 @@ struct mac_iveiv_entry {
#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e)
#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070)
#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380)
+#define MAC_WCID_ATTRIBUTE_CIPHER_EXT FIELD32(0x00000400)
+#define MAC_WCID_ATTRIBUTE_BSS_IDX_EXT FIELD32(0x00000800)
+#define MAC_WCID_ATTRIBUTE_WAPI_MCBC FIELD32(0x00008000)
+#define MAC_WCID_ATTRIBUTE_WAPI_KEY_IDX FIELD32(0xff000000)
/*
* SHARED_KEY_MODE:
@@ -1510,7 +1561,9 @@ struct mac_iveiv_entry {
*/
/*
- * BBP 1: TX Antenna
+ * BBP 1: TX Antenna & Power
+ * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm,
+ * 3 - increase tx power by 6dBm
*/
#define BBP1_TX_POWER FIELD8(0x07)
#define BBP1_TX_ANTENNA FIELD8(0x18)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index db4250d1c8b3..14c361ae87be 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1,9 +1,9 @@
/*
+ Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com>
Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com>
Based on the original rt2800pci.c and rt2800usb.c.
- Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
@@ -38,16 +38,8 @@
#include <linux/slab.h>
#include "rt2x00.h"
-#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
-#include "rt2x00usb.h"
-#endif
#include "rt2800lib.h"
#include "rt2800.h"
-#include "rt2800usb.h"
-
-MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
-MODULE_DESCRIPTION("rt2800 library");
-MODULE_LICENSE("GPL");
/*
* Register access.
@@ -282,9 +274,8 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
-void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc)
+void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc)
{
- __le32 *txwi = (__le32 *)(skb->data - TXWI_DESC_SIZE);
u32 word;
/*
@@ -380,6 +371,67 @@ void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *rxdesc)
}
EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
+void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ unsigned int beacon_base;
+ u32 reg;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ /*
+ * Add space for the TXWI in front of the skb.
+ */
+ skb_push(entry->skb, TXWI_DESC_SIZE);
+ memset(entry->skb, 0, TXWI_DESC_SIZE);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->flags |= SKBDESC_DESC_IN_SKB;
+ skbdesc->desc = entry->skb->data;
+ skbdesc->desc_len = TXWI_DESC_SIZE;
+
+ /*
+ * Add the TXWI for the beacon to the skb.
+ */
+ rt2800_write_txwi((__le32 *)entry->skb->data, txdesc);
+
+ /*
+ * Dump beacon to userspace through debugfs.
+ */
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+
+ /*
+ * Write entire beacon with TXWI to register.
+ */
+ beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+ rt2800_register_multiwrite(rt2x00dev, beacon_base,
+ entry->skb->data, entry->skb->len);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ /*
+ * Clean up beacon skb.
+ */
+ dev_kfree_skb_any(entry->skb);
+ entry->skb = NULL;
+}
+EXPORT_SYMBOL(rt2800_write_beacon);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -502,15 +554,28 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
- rt2800_register_read(rt2x00dev, offset, &reg);
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
- !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
- (crypto->cmd == SET_KEY) * crypto->cipher);
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
- (crypto->cmd == SET_KEY) * crypto->bssidx);
- rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
- rt2800_register_write(rt2x00dev, offset, reg);
+ if (crypto->cmd == SET_KEY) {
+ rt2800_register_read(rt2x00dev, offset, &reg);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
+ !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+ /*
+ * Both the cipher as the BSS Idx numbers are split in a main
+ * value of 3 bits, and a extended field for adding one additional
+ * bit to the value.
+ */
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
+ (crypto->cipher & 0x7));
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER_EXT,
+ (crypto->cipher & 0x8) >> 3);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
+ (crypto->bssidx & 0x7));
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT,
+ (crypto->bssidx & 0x8) >> 3);
+ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
+ rt2800_register_write(rt2x00dev, offset, reg);
+ } else {
+ rt2800_register_write(rt2x00dev, offset, 0);
+ }
offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
@@ -1023,7 +1088,7 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
u8 r1;
rt2800_bbp_read(rt2x00dev, 1, &r1);
- rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
+ rt2x00_set_field8(&r1, BBP1_TX_POWER, 0);
rt2800_bbp_write(rt2x00dev, 1, r1);
rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
@@ -1212,6 +1277,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
u32 reg;
u16 eeprom;
unsigned int i;
+ int ret;
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
@@ -1221,59 +1287,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
- if (rt2x00_is_usb(rt2x00dev)) {
- /*
- * Wait until BBP and RF are ready.
- */
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
- if (reg && reg != ~0)
- break;
- msleep(1);
- }
-
- if (i == REGISTER_BUSY_COUNT) {
- ERROR(rt2x00dev, "Unstable hardware.\n");
- return -EBUSY;
- }
-
- rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
- reg & ~0x00002000);
- } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
- /*
- * Reset DMA indexes
- */
- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
- rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
- }
-
- rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
- rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
- if (rt2x00_is_usb(rt2x00dev)) {
- rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
- rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
- USB_MODE_RESET, REGISTER_TIMEOUT);
-#endif
- }
-
- rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+ ret = rt2800_drv_init_registers(rt2x00dev);
+ if (ret)
+ return ret;
rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
@@ -1328,7 +1344,6 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
}
- rt2800_register_write(rt2x00dev, TX_SW_CFG2, reg);
} else if (rt2x00_rt(rt2x00dev, RT3070)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
@@ -1339,6 +1354,10 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
}
+ } else if (rt2800_is_305x_soc(rt2x00dev)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -1560,9 +1579,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
if (rt2x00_is_usb(rt2x00dev)) {
- rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg);
- rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
- rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg);
+ rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
+ rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
+ rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -1706,8 +1725,7 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
- if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D) ||
- rt2x00_rt_rev(rt2x00dev, RT2870, REV_RT2870D))
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
else
rt2800_bbp_write(rt2x00dev, 84, 0x99);
@@ -2013,8 +2031,7 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
+ if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
@@ -2147,7 +2164,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
} else if (rt2x00_rt(rt2x00dev, RT2860) ||
- rt2x00_rt(rt2x00dev, RT2870) ||
rt2x00_rt(rt2x00dev, RT2872)) {
/*
* There is a max of 2 RX streams for RT28x0 series
@@ -2251,7 +2267,6 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
if (!rt2x00_rt(rt2x00dev, RT2860) &&
- !rt2x00_rt(rt2x00dev, RT2870) &&
!rt2x00_rt(rt2x00dev, RT2872) &&
!rt2x00_rt(rt2x00dev, RT2883) &&
!rt2x00_rt(rt2x00dev, RT3070) &&
@@ -2491,6 +2506,18 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_eeprom_addr(rt2x00dev,
EEPROM_MAC_ADDR_0));
+ /*
+ * As rt2800 has a global fallback table we cannot specify
+ * more then one tx rate per frame but since the hw will
+ * try several rates (based on the fallback table) we should
+ * still initialize max_rates to the maximum number of rates
+ * we are going to try. Otherwise mac80211 will truncate our
+ * reported tx rates and the rc algortihm will end up with
+ * incorrect data.
+ */
+ rt2x00dev->hw->max_rates = 7;
+ rt2x00dev->hw->max_rate_tries = 1;
+
rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
/*
@@ -2528,16 +2555,16 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
else
spec->ht.ht_supported = false;
- /*
- * Don't set IEEE80211_HT_CAP_SUP_WIDTH_20_40 for now as it causes
- * reception problems with HT40 capable 11n APs
- */
spec->ht.cap =
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_TX_STBC |
IEEE80211_HT_CAP_RX_STBC;
+
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) >= 2)
+ spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
+
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
@@ -2743,3 +2770,8 @@ const struct ieee80211_ops rt2800_mac80211_ops = {
.rfkill_poll = rt2x00mac_rfkill_poll,
};
EXPORT_SYMBOL_GPL(rt2800_mac80211_ops);
+
+MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink RT2800 library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 94de999e2290..8313dbf441a5 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -40,6 +40,8 @@ struct rt2800_ops {
int (*regbusy_read)(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
const struct rt2x00_field32 field, u32 *reg);
+
+ int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
};
static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev,
@@ -107,13 +109,22 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
}
+static inline int rt2800_drv_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+ const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
+
+ return rt2800ops->drv_init_registers(rt2x00dev);
+}
+
void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1);
-void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc);
+void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc);
void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *txdesc);
+void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
+
extern const struct rt2x00debug rt2800_rt2x00debug;
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b2f23272c3aa..e5ea670a18db 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -51,7 +51,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 1;
+static int modparam_nohwcrypt = 0;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -446,6 +446,38 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
}
+static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+
+ /*
+ * Reset DMA indexes
+ */
+ rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+ rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+ rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+ rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+ rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+ return 0;
+}
+
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -465,7 +497,7 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Send signal to firmware during boot time.
*/
- rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
+ rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
/*
* Enable RX.
@@ -613,18 +645,10 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
-static int rt2800pci_write_tx_data(struct queue_entry* entry,
- struct txentry_desc *txdesc)
+static void rt2800pci_write_tx_datadesc(struct queue_entry* entry,
+ struct txentry_desc *txdesc)
{
- int ret;
-
- ret = rt2x00pci_write_tx_data(entry, txdesc);
- if (ret)
- return ret;
-
- rt2800_write_txwi(entry->skb, txdesc);
-
- return 0;
+ rt2800_write_txwi((__le32 *) entry->skb->data, txdesc);
}
@@ -684,49 +708,6 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* TX data initialization
*/
-static void rt2800pci_write_beacon(struct queue_entry *entry,
- struct txentry_desc *txdesc)
-{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- unsigned int beacon_base;
- u32 reg;
-
- /*
- * Disable beaconing while we are reloading the beacon data,
- * otherwise we might be sending out invalid data.
- */
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- /*
- * Add the TXWI for the beacon to the skb.
- */
- rt2800_write_txwi(entry->skb, txdesc);
- skb_push(entry->skb, TXWI_DESC_SIZE);
-
- /*
- * Write entire beacon with TXWI to register.
- */
- beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2800_register_multiwrite(rt2x00dev, beacon_base,
- entry->skb->data, entry->skb->len);
-
- /*
- * Enable beaconing again.
- */
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- /*
- * Clean up beacon skb.
- */
- dev_kfree_skb_any(entry->skb);
- entry->skb = NULL;
-}
-
static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue_idx)
{
@@ -832,29 +813,24 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
struct txdone_entry_desc txdesc;
u32 word;
u32 reg;
- u32 old_reg;
int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
u16 mcs, real_mcs;
+ int i;
/*
- * During each loop we will compare the freshly read
- * TX_STA_FIFO register value with the value read from
- * the previous loop. If the 2 values are equal then
- * we should stop processing because the chance it
- * quite big that the device has been unplugged and
- * we risk going into an endless loop.
+ * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
+ * at most X times and also stop processing once the TX_STA_FIFO_VALID
+ * flag is not set anymore.
+ *
+ * The legacy drivers use X=TX_RING_SIZE but state in a comment
+ * that the TX_STA_FIFO stack has a size of 16. We stick to our
+ * tx ring size for now.
*/
- old_reg = 0;
-
- while (1) {
+ for (i = 0; i < TX_ENTRIES; i++) {
rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
break;
- if (old_reg == reg)
- break;
- old_reg = reg;
-
wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
@@ -880,8 +856,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
/* Check if we got a match by looking at WCID/ACK/PID
* fields */
- txwi = (__le32 *)(entry->skb->data -
- rt2x00dev->ops->extra_tx_headroom);
+ txwi = (__le32 *) entry->skb->data;
rt2x00_desc_read(txwi, 1, &word);
tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
@@ -923,10 +898,14 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
txdesc.retry = 7;
}
- __set_bit(TXDONE_FALLBACK, &txdesc.flags);
-
+ /*
+ * the frame was retried at least once
+ * -> hw used fallback rates
+ */
+ if (txdesc.retry)
+ __set_bit(TXDONE_FALLBACK, &txdesc.flags);
- rt2x00lib_txdone(entry, &txdesc);
+ rt2x00pci_txdone(entry, &txdesc);
}
}
@@ -996,6 +975,8 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
.register_multiwrite = rt2x00pci_register_multiwrite,
.regbusy_read = rt2x00pci_regbusy_read,
+
+ .drv_init_registers = rt2800pci_init_registers,
};
static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1063,8 +1044,9 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
.write_tx_desc = rt2800pci_write_tx_desc,
- .write_tx_data = rt2800pci_write_tx_data,
- .write_beacon = rt2800pci_write_beacon,
+ .write_tx_data = rt2x00pci_write_tx_data,
+ .write_tx_datadesc = rt2800pci_write_tx_datadesc,
+ .write_beacon = rt2800_write_beacon,
.kick_tx_queue = rt2800pci_kick_tx_queue,
.kill_tx_queue = rt2800pci_kill_tx_queue,
.fill_rxdone = rt2800pci_fill_rxdone,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index afc8e7da27cb..5a8dda9b5b5a 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -35,25 +35,6 @@
#define RT2800PCI_H
/*
- * PCI registers.
- */
-
-/*
- * E2PROM_CSR: EEPROM control register.
- * RELOAD: Write 1 to reload eeprom content.
- * TYPE: 0: 93c46, 1:93c66.
- * LOAD_STATUS: 1:loading, 0:done.
- */
-#define E2PROM_CSR 0x0004
-#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001)
-#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002)
-#define E2PROM_CSR_DATA_IN FIELD32(0x00000004)
-#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008)
-#define E2PROM_CSR_TYPE FIELD32(0x00000030)
-#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040)
-#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
-
-/*
* Queue register offset macros
*/
#define TX_QUEUE_REG_OFFSET 0x10
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 0f8b84b7224c..f18c12a19cc9 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,7 +45,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 1;
+static int modparam_nohwcrypt = 0;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -169,11 +169,8 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
/*
* Write firmware to device.
*/
- rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
- USB_VENDOR_REQUEST_OUT,
- FIRMWARE_IMAGE_BASE,
- data + offset, length,
- REGISTER_TIMEOUT32(length));
+ rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
+ data + offset, length);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
@@ -196,7 +193,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
/*
* Send signal to firmware during boot time.
*/
- rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
+ rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
@@ -246,6 +243,44 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
}
+static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ int i;
+
+ /*
+ * Wait until BBP and RF are ready.
+ */
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+ if (reg && reg != ~0)
+ break;
+ msleep(1);
+ }
+
+ if (i == REGISTER_BUSY_COUNT) {
+ ERROR(rt2x00dev, "Unstable hardware.\n");
+ return -EBUSY;
+ }
+
+ rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
+ rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
+
+ rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+ rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
+
+ rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
+ USB_MODE_RESET, REGISTER_TIMEOUT);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+ return 0;
+}
+
static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -400,20 +435,21 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txi = (__le32 *)(skb->data - TXWI_DESC_SIZE - TXINFO_DESC_SIZE);
+ __le32 *txi = (__le32 *) skb->data;
+ __le32 *txwi = (__le32 *) (skb->data + TXINFO_DESC_SIZE);
u32 word;
/*
* Initialize TXWI descriptor
*/
- rt2800_write_txwi(skb, txdesc);
+ rt2800_write_txwi(txwi, txdesc);
/*
* Initialize TXINFO descriptor
*/
rt2x00_desc_read(txi, 0, &word);
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
- skb->len + TXWI_DESC_SIZE);
+ skb->len - TXINFO_DESC_SIZE);
rt2x00_set_field32(&word, TXINFO_W0_WIV,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -426,6 +462,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* Register descriptor details in skb frame descriptor.
*/
+ skbdesc->flags |= SKBDESC_DESC_IN_SKB;
skbdesc->desc = txi;
skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
}
@@ -433,51 +470,6 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* TX data initialization
*/
-static void rt2800usb_write_beacon(struct queue_entry *entry,
- struct txentry_desc *txdesc)
-{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- unsigned int beacon_base;
- u32 reg;
-
- /*
- * Disable beaconing while we are reloading the beacon data,
- * otherwise we might be sending out invalid data.
- */
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- /*
- * Add the TXWI for the beacon to the skb.
- */
- rt2800_write_txwi(entry->skb, txdesc);
- skb_push(entry->skb, TXWI_DESC_SIZE);
-
- /*
- * Write entire beacon with descriptor to register.
- */
- beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
- USB_VENDOR_REQUEST_OUT, beacon_base,
- entry->skb->data, entry->skb->len,
- REGISTER_TIMEOUT32(entry->skb->len));
-
- /*
- * Enable beaconing again.
- */
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- /*
- * Clean up the beacon skb.
- */
- dev_kfree_skb(entry->skb);
- entry->skb = NULL;
-}
-
static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
{
int length;
@@ -595,6 +587,8 @@ static const struct rt2800_ops rt2800usb_rt2800_ops = {
.register_multiwrite = rt2x00usb_register_multiwrite,
.regbusy_read = rt2x00usb_regbusy_read,
+
+ .drv_init_registers = rt2800usb_init_registers,
};
static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -659,7 +653,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.link_tuner = rt2800_link_tuner,
.write_tx_desc = rt2800usb_write_tx_desc,
.write_tx_data = rt2x00usb_write_tx_data,
- .write_beacon = rt2800usb_write_beacon,
+ .write_beacon = rt2800_write_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
.kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 2bca6a71a7f5..0722badccf86 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -32,43 +32,6 @@
#define RT2800USB_H
/*
- * USB registers.
- */
-
-/*
- * USB_DMA_CFG
- * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
- * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
- * PHY_CLEAR: phy watch dog enable.
- * TX_CLEAR: Clear USB DMA TX path.
- * TXOP_HALT: Halt TXOP count down when TX buffer is full.
- * RX_BULK_AGG_EN: Enable Rx Bulk Aggregation.
- * RX_BULK_EN: Enable USB DMA Rx.
- * TX_BULK_EN: Enable USB DMA Tx.
- * EP_OUT_VALID: OUT endpoint data valid.
- * RX_BUSY: USB DMA RX FSM busy.
- * TX_BUSY: USB DMA TX FSM busy.
- */
-#define USB_DMA_CFG 0x02a0
-#define USB_DMA_CFG_RX_BULK_AGG_TIMEOUT FIELD32(0x000000ff)
-#define USB_DMA_CFG_RX_BULK_AGG_LIMIT FIELD32(0x0000ff00)
-#define USB_DMA_CFG_PHY_CLEAR FIELD32(0x00010000)
-#define USB_DMA_CFG_TX_CLEAR FIELD32(0x00080000)
-#define USB_DMA_CFG_TXOP_HALT FIELD32(0x00100000)
-#define USB_DMA_CFG_RX_BULK_AGG_EN FIELD32(0x00200000)
-#define USB_DMA_CFG_RX_BULK_EN FIELD32(0x00400000)
-#define USB_DMA_CFG_TX_BULK_EN FIELD32(0x00800000)
-#define USB_DMA_CFG_EP_OUT_VALID FIELD32(0x3f000000)
-#define USB_DMA_CFG_RX_BUSY FIELD32(0x40000000)
-#define USB_DMA_CFG_TX_BUSY FIELD32(0x80000000)
-
-/*
- * USB_CYC_CFG
- */
-#define USB_CYC_CFG 0x02a4
-#define USB_CYC_CFG_CLOCK_CYCLE FIELD32(0x000000ff)
-
-/*
* 8051 firmware image.
*/
#define FIRMWARE_RT2870 "rt2870.bin"
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 6c1ff4c15c84..e7acc6abfd89 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -39,6 +39,7 @@
#include <net/mac80211.h>
#include "rt2x00debug.h"
+#include "rt2x00dump.h"
#include "rt2x00leds.h"
#include "rt2x00reg.h"
#include "rt2x00queue.h"
@@ -159,6 +160,7 @@ struct avg_val {
enum rt2x00_chip_intf {
RT2X00_CHIP_INTF_PCI,
+ RT2X00_CHIP_INTF_PCIE,
RT2X00_CHIP_INTF_USB,
RT2X00_CHIP_INTF_SOC,
};
@@ -175,8 +177,7 @@ struct rt2x00_chip {
#define RT2570 0x2570
#define RT2661 0x2661
#define RT2573 0x2573
-#define RT2860 0x2860 /* 2.4GHz PCI/CB */
-#define RT2870 0x2870
+#define RT2860 0x2860 /* 2.4GHz */
#define RT2872 0x2872 /* WSOC */
#define RT2883 0x2883 /* WSOC */
#define RT3070 0x3070
@@ -551,6 +552,8 @@ struct rt2x00lib_ops {
struct txentry_desc *txdesc);
int (*write_tx_data) (struct queue_entry *entry,
struct txentry_desc *txdesc);
+ void (*write_tx_datadesc) (struct queue_entry *entry,
+ struct txentry_desc *txdesc);
void (*write_beacon) (struct queue_entry *entry,
struct txentry_desc *txdesc);
int (*get_tx_data_len) (struct queue_entry *entry);
@@ -978,7 +981,13 @@ static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev,
static inline bool rt2x00_is_pci(struct rt2x00_dev *rt2x00dev)
{
- return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI) ||
+ rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
+}
+
+static inline bool rt2x00_is_pcie(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
}
static inline bool rt2x00_is_usb(struct rt2x00_dev *rt2x00dev)
@@ -999,6 +1008,13 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
/**
+ * rt2x00queue_unmap_skb - Unmap a skb from DMA.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @skb: The skb to unmap.
+ */
+void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
+
+/**
* rt2x00queue_get_queue - Convert queue index to queue pointer
* @rt2x00dev: Pointer to &struct rt2x00_dev.
* @queue: rt2x00 queue index (see &enum data_queue_qid).
@@ -1015,6 +1031,26 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
enum queue_index index);
/*
+ * Debugfs handlers.
+ */
+/**
+ * rt2x00debug_dump_frame - Dump a frame to userspace through debugfs.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @type: The type of frame that is being dumped.
+ * @skb: The skb containing the frame to be dumped.
+ */
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
+ enum rt2x00_dump_type type, struct sk_buff *skb);
+#else
+static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
+ enum rt2x00_dump_type type,
+ struct sk_buff *skb)
+{
+}
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+
+/*
* Interrupt context handlers.
*/
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 098315a271ca..8dbd634dae27 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -170,23 +170,27 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
unsigned int ieee80211_flags)
{
struct rt2x00lib_conf libconf;
+ u16 hw_value;
memset(&libconf, 0, sizeof(libconf));
libconf.conf = conf;
if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
- if (conf_is_ht40(conf))
+ if (conf_is_ht40(conf)) {
__set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
- else
+ hw_value = rt2x00ht_center_channel(rt2x00dev, conf);
+ } else {
__clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
+ hw_value = conf->channel->hw_value;
+ }
memcpy(&libconf.rf,
- &rt2x00dev->spec.channels[conf->channel->hw_value],
+ &rt2x00dev->spec.channels[hw_value],
sizeof(libconf.rf));
memcpy(&libconf.channel,
- &rt2x00dev->spec.channels_info[conf->channel->hw_value],
+ &rt2x00dev->spec.channels_info[hw_value],
sizeof(libconf.channel));
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index e9fe93fd8042..b0498e7e7aae 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -211,6 +211,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags))
skb_queue_purge(&intf->frame_dump_skbqueue);
}
+EXPORT_SYMBOL_GPL(rt2x00debug_dump_frame);
static int rt2x00debug_file_open(struct inode *inode, struct file *file)
{
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 3ae468c4d760..339cc84bf4fb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -211,11 +211,6 @@ void rt2x00lib_txdone(struct queue_entry *entry,
bool success;
/*
- * Unmap the skb.
- */
- rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
-
- /*
* Remove L2 padding which was added during
*/
if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
@@ -224,7 +219,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
/*
* If the IV/EIV data was stripped from the frame before it was
* passed to the hardware, we should now reinsert it again because
- * mac80211 will expect the the same data to be present it the
+ * mac80211 will expect the same data to be present it the
* frame as it was passed to us.
*/
if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
@@ -241,8 +236,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
*/
success =
test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
- test_bit(TXDONE_UNKNOWN, &txdesc->flags) ||
- test_bit(TXDONE_FALLBACK, &txdesc->flags);
+ test_bit(TXDONE_UNKNOWN, &txdesc->flags);
/*
* Update TX statistics.
@@ -264,11 +258,22 @@ void rt2x00lib_txdone(struct queue_entry *entry,
/*
* Frame was send with retries, hardware tried
* different rates to send out the frame, at each
- * retry it lowered the rate 1 step.
+ * retry it lowered the rate 1 step except when the
+ * lowest rate was used.
*/
for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) {
tx_info->status.rates[i].idx = rate_idx - i;
tx_info->status.rates[i].flags = rate_flags;
+
+ if (rate_idx - i == 0) {
+ /*
+ * The lowest rate (index 0) was used until the
+ * number of max retries was reached.
+ */
+ tx_info->status.rates[i].count = retry_rates - i;
+ i++;
+ break;
+ }
tx_info->status.rates[i].count = 1;
}
if (i < (IEEE80211_TX_MAX_RATES - 1))
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index ed303b423e41..6df2e0b746b8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -20,7 +20,12 @@
/*
Module: rt2x00dump
- Abstract: Data structures for the rt2x00debug & userspace.
+ Abstract:
+ Data structures for the rt2x00debug & userspace.
+
+ The declarations in this file can be used by both rt2x00
+ and userspace and therefore should be kept together in
+ this file.
*/
#ifndef RT2X00DUMP_H
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index 5a407602ce3e..c004cd3a8847 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -44,11 +44,22 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
txdesc->mpdu_density = 0;
txdesc->ba_size = 7; /* FIXME: What value is needed? */
- txdesc->stbc = 0; /* FIXME: What value is needed? */
- txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
- if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- txdesc->mcs |= 0x08;
+ txdesc->stbc =
+ (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;
+
+ /*
+ * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
+ * mcs rate to be used
+ */
+ if (txrate->flags & IEEE80211_TX_RC_MCS) {
+ txdesc->mcs = txrate->idx;
+ } else {
+ txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
+ if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ txdesc->mcs |= 0x08;
+ }
+
/*
* Convert flags
@@ -84,3 +95,31 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
else
txdesc->txop = TXOP_HTTXOP;
}
+
+u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf)
+{
+ struct hw_mode_spec *spec = &rt2x00dev->spec;
+ int center_channel;
+ u16 i;
+
+ /*
+ * Initialize center channel to current channel.
+ */
+ center_channel = spec->channels[conf->channel->hw_value].channel;
+
+ /*
+ * Adjust center channel to HT40+ and HT40- operation.
+ */
+ if (conf_is_ht40_plus(conf))
+ center_channel += 2;
+ else if (conf_is_ht40_minus(conf))
+ center_channel -= (center_channel == 14) ? 1 : 2;
+
+ for (i = 0; i < spec->num_channels; i++)
+ if (spec->channels[i].channel == center_channel)
+ return i;
+
+ WARN_ON(1);
+ return conf->channel->hw_value;
+}
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index be2e37fb4071..ed27de1de57b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -27,8 +27,6 @@
#ifndef RT2X00LIB_H
#define RT2X00LIB_H
-#include "rt2x00dump.h"
-
/*
* Interval defines
*/
@@ -107,13 +105,6 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
struct queue_entry *entry);
/**
- * rt2x00queue_unmap_skb - Unmap a skb from DMA.
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- * @skb: The skb to unmap.
- */
-void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
-
-/**
* rt2x00queue_free_skb - free a skb
* @rt2x00dev: Pointer to &struct rt2x00_dev.
* @skb: The skb to free.
@@ -296,8 +287,6 @@ static inline void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev)
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
void rt2x00debug_register(struct rt2x00_dev *rt2x00dev);
void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev);
-void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
- enum rt2x00_dump_type type, struct sk_buff *skb);
void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
struct rxdone_entry_desc *rxdesc);
#else
@@ -309,12 +298,6 @@ static inline void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
{
}
-static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
- enum rt2x00_dump_type type,
- struct sk_buff *skb)
-{
-}
-
static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
struct rxdone_entry_desc *rxdesc)
{
@@ -384,12 +367,21 @@ static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
struct txentry_desc *txdesc,
const struct rt2x00_rate *hwrate);
+
+u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf);
#else
static inline void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
struct txentry_desc *txdesc,
const struct rt2x00_rate *hwrate)
{
}
+
+static inline u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf)
+{
+ return conf->channel->hw_value;
+}
#endif /* CONFIG_RT2X00_LIB_HT */
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index f71eee67f977..10eaffd12b1b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -81,6 +81,24 @@ int rt2x00pci_write_tx_data(struct queue_entry *entry,
return -EINVAL;
}
+ /*
+ * Add the requested extra tx headroom in front of the skb.
+ */
+ skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
+ memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Call the driver's write_tx_datadesc function, if it exists.
+ */
+ if (rt2x00dev->ops->lib->write_tx_datadesc)
+ rt2x00dev->ops->lib->write_tx_datadesc(entry, txdesc);
+
+ /*
+ * Map the skb to DMA.
+ */
+ if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+ rt2x00queue_map_txskb(rt2x00dev, entry->skb);
+
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
@@ -88,6 +106,34 @@ EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
/*
* TX/RX data handlers.
*/
+void rt2x00pci_txdone(struct queue_entry *entry,
+ struct txdone_entry_desc *txdesc)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+
+ /*
+ * Unmap the skb.
+ */
+ rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
+
+ /*
+ * Remove the extra tx headroom from the skb.
+ */
+ skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Signal that the TX descriptor is no longer in the skb.
+ */
+ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
+
+ /*
+ * Pass on to rt2x00lib.
+ */
+ rt2x00lib_txdone(entry, txdesc);
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
+
void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue = rt2x00dev->rx;
@@ -305,7 +351,10 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
rt2x00dev->irq = pci_dev->irq;
rt2x00dev->name = pci_name(pci_dev);
- rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
+ if (pci_dev->is_pcie)
+ rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
+ else
+ rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
retval = rt2x00pci_alloc_reg(rt2x00dev);
if (retval)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 51bcef3839ce..00528b8a754d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -109,6 +109,14 @@ struct queue_entry_priv_pci {
};
/**
+ * rt2x00pci_txdone - Handle TX done events.
+ * @entry: The queue entry for which a TX done event was received.
+ * @txdesc: The TX done descriptor for the entry.
+ */
+void rt2x00pci_txdone(struct queue_entry *entry,
+ struct txdone_entry_desc *txdesc);
+
+/**
* rt2x00pci_rxdone - Handle RX done events
* @rt2x00dev: Device pointer, see &struct rt2x00_dev.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 20dbdd6fb904..f91637147116 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -100,21 +100,8 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- /*
- * If device has requested headroom, we should make sure that
- * is also mapped to the DMA so it can be used for transfering
- * additional descriptor information to the hardware.
- */
- skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
-
skbdesc->skb_dma =
dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
-
- /*
- * Restore data pointer to original location again.
- */
- skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
-
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -130,16 +117,12 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
}
if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
- /*
- * Add headroom to the skb length, it has been removed
- * by the driver, but it was actually mapped to DMA.
- */
- dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
- skb->len + rt2x00dev->ops->extra_tx_headroom,
+ dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
DMA_TO_DEVICE);
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
}
}
+EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
{
@@ -370,13 +353,18 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Check if more fragments are pending
*/
- if (ieee80211_has_morefrags(hdr->frame_control) ||
- (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) {
+ if (ieee80211_has_morefrags(hdr->frame_control)) {
__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
}
/*
+ * Check if more frames (!= fragments) are pending
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
+ __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
+
+ /*
* Beacons and probe responses require the tsf timestamp
* to be inserted into the frame, except for a frame that has been injected
* through a monitor interface. This latter is needed for testing a
@@ -421,7 +409,6 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
{
struct data_queue *queue = entry->queue;
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- enum rt2x00_dump_type dump_type;
rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
@@ -429,9 +416,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
* All processing on the frame has been completed, this means
* it is now ready to be dumped to userspace through debugfs.
*/
- dump_type = (txdesc->queue == QID_BEACON) ?
- DUMP_FRAME_BEACON : DUMP_FRAME_TX;
- rt2x00debug_dump_frame(rt2x00dev, dump_type, entry->skb);
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
}
static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
@@ -537,9 +522,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
return -EIO;
}
- if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
- rt2x00queue_map_txskb(queue->rt2x00dev, skb);
-
set_bit(ENTRY_DATA_PENDING, &entry->flags);
rt2x00queue_index_inc(queue, Q_INDEX);
@@ -595,11 +577,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
skbdesc->entry = intf->beacon;
/*
- * Write TX descriptor into reserved room in front of the beacon.
- */
- rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
-
- /*
* Send beacon to hardware and enable beacon genaration..
*/
rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index f79170849add..bd54f55a8cb9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -213,9 +213,16 @@ struct rxdone_entry_desc {
/**
* enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
*
+ * Every txdone report has to contain the basic result of the
+ * transmission, either &TXDONE_UNKNOWN, &TXDONE_SUCCESS or
+ * &TXDONE_FAILURE. The flag &TXDONE_FALLBACK can be used in
+ * conjunction with all of these flags but should only be set
+ * if retires > 0. The flag &TXDONE_EXCESSIVE_RETRY can only be used
+ * in conjunction with &TXDONE_FAILURE.
+ *
* @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
* @TXDONE_SUCCESS: Frame was successfully send
- * @TXDONE_FALLBACK: Frame was successfully send using a fallback rate.
+ * @TXDONE_FALLBACK: Hardware used fallback rates for retries
* @TXDONE_FAILURE: Frame was not successfully send
* @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
* frame transmission failed due to excessive retries.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index bd1546ba7ad2..b45bc24c3dae 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -113,26 +113,6 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
const u16 offset, void *buffer,
const u16 buffer_length, const int timeout)
{
- int status;
-
- mutex_lock(&rt2x00dev->csr_mutex);
-
- status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
- requesttype, offset, buffer,
- buffer_length, timeout);
-
- mutex_unlock(&rt2x00dev->csr_mutex);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
-
-int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
- const u8 request, const u8 requesttype,
- const u16 offset, const void *buffer,
- const u16 buffer_length,
- const int timeout)
-{
int status = 0;
unsigned char *tb;
u16 off, len, bsize;
@@ -157,7 +137,7 @@ int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
return status;
}
-EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff);
+EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
const unsigned int offset,
@@ -198,6 +178,11 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
return;
/*
+ * Remove the descriptor from the front of the skb.
+ */
+ skb_pull(entry->skb, entry->queue->desc_size);
+
+ /*
* Obtain the status about this packet.
* Note that when the status is 0 it does not mean the
* frame was send out correctly. It only means the frame
@@ -243,10 +228,10 @@ int rt2x00usb_write_tx_data(struct queue_entry *entry,
rt2x00usb_interrupt_txdone, entry);
/*
- * Make sure the skb->data pointer points to the frame, not the
- * descriptor.
+ * Call the driver's write_tx_datadesc function, if it exists.
*/
- skb_pull(entry->skb, entry->queue->desc_size);
+ if (rt2x00dev->ops->lib->write_tx_datadesc)
+ rt2x00dev->ops->lib->write_tx_datadesc(entry, txdesc);
return 0;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 621d0f829251..255b81ef9530 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -167,25 +167,6 @@ int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
const u16 buffer_length, const int timeout);
/**
- * rt2x00usb_vendor_request_large_buff - Send register command to device (buffered)
- * @rt2x00dev: Pointer to &struct rt2x00_dev
- * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
- * @requesttype: Request type &USB_VENDOR_REQUEST_*
- * @offset: Register start offset to perform action on
- * @buffer: Buffer where information will be read/written to by device
- * @buffer_length: Size of &buffer
- * @timeout: Operation timeout
- *
- * This function is used to transfer register data in blocks larger
- * then CSR_CACHE_SIZE. Use for firmware upload, keys and beacons.
- */
-int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
- const u8 request, const u8 requesttype,
- const u16 offset, const void *buffer,
- const u16 buffer_length,
- const int timeout);
-
-/**
* rt2x00usb_vendor_request_sw - Send single register command to device
* @rt2x00dev: Pointer to &struct rt2x00_dev
* @request: USB vendor command (See &enum rt2x00usb_vendor_request)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 6a74baf4e934..7ca383478eeb 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -931,6 +931,9 @@ static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
u32 reg;
rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
libconf->conf->long_frame_max_tx_count);
rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
@@ -1874,6 +1877,16 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
/*
+ * Write the TX descriptor for the beacon.
+ */
+ rt61pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+ /*
+ * Dump beacon to userspace through debugfs.
+ */
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+
+ /*
* Write entire beacon with descriptor to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -2039,29 +2052,24 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
struct txdone_entry_desc txdesc;
u32 word;
u32 reg;
- u32 old_reg;
int type;
int index;
+ int i;
/*
- * During each loop we will compare the freshly read
- * STA_CSR4 register value with the value read from
- * the previous loop. If the 2 values are equal then
- * we should stop processing because the chance is
- * quite big that the device has been unplugged and
- * we risk going into an endless loop.
+ * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
+ * at most X times and also stop processing once the TX_STA_FIFO_VALID
+ * flag is not set anymore.
+ *
+ * The legacy drivers use X=TX_RING_SIZE but state in a comment
+ * that the TX_STA_FIFO stack has a size of 16. We stick to our
+ * tx ring size for now.
*/
- old_reg = 0;
-
- while (1) {
+ for (i = 0; i < TX_ENTRIES; i++) {
rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg);
if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
break;
- if (old_reg == reg)
- break;
- old_reg = reg;
-
/*
* Skip this entry when it contains an invalid
* queue identication number.
@@ -2100,7 +2108,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
__set_bit(TXDONE_UNKNOWN, &txdesc.flags);
txdesc.retry = 0;
- rt2x00lib_txdone(entry_done, &txdesc);
+ rt2x00pci_txdone(entry_done, &txdesc);
entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
}
@@ -2120,7 +2128,14 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
}
txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT);
- rt2x00lib_txdone(entry, &txdesc);
+ /*
+ * the frame was retried at least once
+ * -> hw used fallback rates
+ */
+ if (txdesc.retry)
+ __set_bit(TXDONE_FALLBACK, &txdesc.flags);
+
+ rt2x00pci_txdone(entry, &txdesc);
}
}
@@ -2577,6 +2592,18 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
EEPROM_MAC_ADDR_0));
/*
+ * As rt61 has a global fallback table we cannot specify
+ * more then one tx rate per frame but since the hw will
+ * try several rates (based on the fallback table) we should
+ * still initialize max_rates to the maximum number of rates
+ * we are going to try. Otherwise mac80211 will truncate our
+ * reported tx rates and the rc algortihm will end up with
+ * incorrect data.
+ */
+ rt2x00dev->hw->max_rates = 7;
+ rt2x00dev->hw->max_rate_tries = 1;
+
+ /*
* Initialize hw_mode information.
*/
spec->supported_bands = SUPPORT_BAND_2GHZ;
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index df80f1af22a4..e2e728ab0b2e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -153,13 +153,13 @@ struct hw_key_entry {
u8 key[16];
u8 tx_mic[8];
u8 rx_mic[8];
-} __attribute__ ((packed));
+} __packed;
struct hw_pairwise_ta_entry {
u8 address[6];
u8 cipher;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
/*
* Other on-chip shared memory space.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 6e0d82efe924..d06d90f003e7 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -270,7 +270,6 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
{
struct hw_key_entry key_entry;
struct rt2x00_field32 field;
- int timeout;
u32 mask;
u32 reg;
@@ -306,12 +305,8 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
sizeof(key_entry.rx_mic));
reg = SHARED_KEY_ENTRY(key->hw_key_idx);
- timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
- rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
- USB_VENDOR_REQUEST_OUT, reg,
- &key_entry,
- sizeof(key_entry),
- timeout);
+ rt2x00usb_register_multiwrite(rt2x00dev, reg,
+ &key_entry, sizeof(key_entry));
/*
* The cipher types are stored over 2 registers.
@@ -372,7 +367,6 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
{
struct hw_pairwise_ta_entry addr_entry;
struct hw_key_entry key_entry;
- int timeout;
u32 mask;
u32 reg;
@@ -407,17 +401,11 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
sizeof(key_entry.rx_mic));
reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
- timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
- rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
- USB_VENDOR_REQUEST_OUT, reg,
- &key_entry,
- sizeof(key_entry),
- timeout);
+ rt2x00usb_register_multiwrite(rt2x00dev, reg,
+ &key_entry, sizeof(key_entry));
/*
* Send the address and cipher type to the hardware register.
- * This data fits within the CSR cache size, so we can use
- * rt2x00usb_register_multiwrite() directly.
*/
memset(&addr_entry, 0, sizeof(addr_entry));
memcpy(&addr_entry, crypto->address, ETH_ALEN);
@@ -828,6 +816,9 @@ static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
u32 reg;
rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0);
+ rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0);
rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
libconf->conf->long_frame_max_tx_count);
rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
@@ -1092,11 +1083,7 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev,
/*
* Write firmware to device.
*/
- rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
- USB_VENDOR_REQUEST_OUT,
- FIRMWARE_IMAGE_BASE,
- data, len,
- REGISTER_TIMEOUT32(len));
+ rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len);
/*
* Send firmware request to device to load firmware,
@@ -1442,7 +1429,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
+ __le32 *txd = (__le32 *) skb->data;
u32 word;
/*
@@ -1505,6 +1492,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* Register descriptor details in skb frame descriptor.
*/
+ skbdesc->flags |= SKBDESC_DESC_IN_SKB;
skbdesc->desc = txd;
skbdesc->desc_len = TXD_DESC_SIZE;
}
@@ -1528,18 +1516,27 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
/*
- * Take the descriptor in front of the skb into account.
+ * Add space for the descriptor in front of the skb.
*/
skb_push(entry->skb, TXD_DESC_SIZE);
+ memset(entry->skb->data, 0, TXD_DESC_SIZE);
+
+ /*
+ * Write the TX descriptor for the beacon.
+ */
+ rt73usb_write_tx_desc(rt2x00dev, entry->skb, txdesc);
+
+ /*
+ * Dump beacon to userspace through debugfs.
+ */
+ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
/*
* Write entire beacon with descriptor to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
- USB_VENDOR_REQUEST_OUT, beacon_base,
- entry->skb->data, entry->skb->len,
- REGISTER_TIMEOUT32(entry->skb->len));
+ rt2x00usb_register_multiwrite(rt2x00dev, beacon_base,
+ entry->skb->data, entry->skb->len);
/*
* Enable beaconing again.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7abe7eb14555..44d5b2bebd39 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -108,13 +108,13 @@ struct hw_key_entry {
u8 key[16];
u8 tx_mic[8];
u8 rx_mic[8];
-} __attribute__ ((packed));
+} __packed;
struct hw_pairwise_ta_entry {
u8 address[6];
u8 cipher;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
/*
* Since NULL frame won't be that long (256 byte),
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 4baf0cf0826f..30523314da43 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -36,7 +36,7 @@ struct rtl8180_tx_desc {
u8 agc;
u8 flags2;
u32 reserved[2];
-} __attribute__ ((packed));
+} __packed;
struct rtl8180_rx_desc {
__le32 flags;
@@ -45,7 +45,7 @@ struct rtl8180_rx_desc {
__le32 rx_buf;
__le64 tsft;
};
-} __attribute__ ((packed));
+} __packed;
struct rtl8180_tx_ring {
struct rtl8180_tx_desc *desc;
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6bb32112e65c..98878160a65a 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -47,7 +47,7 @@ struct rtl8187_rx_hdr {
u8 agc;
u8 reserved;
__le64 mac_time;
-} __attribute__((packed));
+} __packed;
struct rtl8187b_rx_hdr {
__le32 flags;
@@ -59,7 +59,7 @@ struct rtl8187b_rx_hdr {
__le16 snr_long2end;
s8 pwdb_g12;
u8 fot;
-} __attribute__((packed));
+} __packed;
/* {rtl8187,rtl8187b}_tx_info is in skb */
@@ -68,7 +68,7 @@ struct rtl8187_tx_hdr {
__le16 rts_duration;
__le16 len;
__le32 retry;
-} __attribute__((packed));
+} __packed;
struct rtl8187b_tx_hdr {
__le32 flags;
@@ -80,7 +80,7 @@ struct rtl8187b_tx_hdr {
__le32 unused_3;
__le32 retry;
__le32 unused_4[2];
-} __attribute__((packed));
+} __packed;
enum {
DEVICE_RTL8187,
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 8522490d2e29..978519d1ff4c 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -185,7 +185,7 @@ struct rtl818x_csr {
u8 reserved_22[4];
__le16 TALLY_CNT;
u8 TALLY_SEL;
-} __attribute__((packed));
+} __packed;
struct rtl818x_rf_ops {
char *name;
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 337fc7bec5a5..2f98058be451 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -41,7 +41,7 @@ config WL1251_SDIO
config WL1271
tristate "TI wl1271 support"
- depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS
+ depends on WL12XX && GENERIC_HARDIRQS
depends on INET
select FW_LOADER
select CRC7
@@ -65,7 +65,7 @@ config WL1271_SPI
config WL1271_SDIO
tristate "TI wl1271 SDIO support"
- depends on WL1271 && MMC && ARM
+ depends on WL1271 && MMC
---help---
This module adds support for the SDIO interface of adapters using
TI wl1271 chipset. Select this if your platform is using
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 26160c45784c..842df310d92a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -60,7 +60,7 @@ struct acx_error_counter {
/* the number of missed sequence numbers in the squentially */
/* values of frames seq numbers */
u32 seq_num_miss;
-} __attribute__ ((packed));
+} __packed;
struct acx_revision {
struct acx_header header;
@@ -89,7 +89,7 @@ struct acx_revision {
* bits 24 - 31: Chip ID - The WiLink chip ID.
*/
u32 hw_version;
-} __attribute__ ((packed));
+} __packed;
enum wl1251_psm_mode {
/* Active mode */
@@ -111,7 +111,7 @@ struct acx_sleep_auth {
/* 2 - ELP mode: Deep / Max sleep*/
u8 sleep_auth;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
enum {
HOSTIF_PCI_MASTER_HOST_INDIRECT,
@@ -159,7 +159,7 @@ struct acx_data_path_params {
* complete ring until an interrupt is generated.
*/
u32 tx_complete_timeout;
-} __attribute__ ((packed));
+} __packed;
struct acx_data_path_params_resp {
@@ -180,7 +180,7 @@ struct acx_data_path_params_resp {
u32 tx_control_addr;
u32 tx_complete_addr;
-} __attribute__ ((packed));
+} __packed;
#define TX_MSDU_LIFETIME_MIN 0
#define TX_MSDU_LIFETIME_MAX 3000
@@ -197,7 +197,7 @@ struct acx_rx_msdu_lifetime {
* firmware discards the MSDU.
*/
u32 lifetime;
-} __attribute__ ((packed));
+} __packed;
/*
* RX Config Options Table
@@ -285,7 +285,7 @@ struct acx_rx_config {
u32 config_options;
u32 filter_options;
-} __attribute__ ((packed));
+} __packed;
enum {
QOS_AC_BE = 0,
@@ -325,13 +325,13 @@ struct acx_tx_queue_qos_config {
/* Lowest memory blocks guaranteed for this queue */
u16 low_threshold;
-} __attribute__ ((packed));
+} __packed;
struct acx_packet_detection {
struct acx_header header;
u32 threshold;
-} __attribute__ ((packed));
+} __packed;
enum acx_slot_type {
@@ -349,7 +349,7 @@ struct acx_slot {
u8 wone_index; /* Reserved */
u8 slot_time;
u8 reserved[6];
-} __attribute__ ((packed));
+} __packed;
#define ADDRESS_GROUP_MAX (8)
@@ -362,7 +362,7 @@ struct acx_dot11_grp_addr_tbl {
u8 num_groups;
u8 pad[2];
u8 mac_table[ADDRESS_GROUP_MAX_LEN];
-} __attribute__ ((packed));
+} __packed;
#define RX_TIMEOUT_PS_POLL_MIN 0
@@ -388,7 +388,7 @@ struct acx_rx_timeout {
* from an UPSD enabled queue.
*/
u16 upsd_timeout;
-} __attribute__ ((packed));
+} __packed;
#define RTS_THRESHOLD_MIN 0
#define RTS_THRESHOLD_MAX 4096
@@ -399,7 +399,7 @@ struct acx_rts_threshold {
u16 threshold;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
struct acx_beacon_filter_option {
struct acx_header header;
@@ -415,7 +415,7 @@ struct acx_beacon_filter_option {
*/
u8 max_num_beacons;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
/*
* ACXBeaconFilterEntry (not 221)
@@ -461,7 +461,7 @@ struct acx_beacon_filter_ie_table {
u8 num_ie;
u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */
#define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */
@@ -494,7 +494,7 @@ struct acx_bt_wlan_coex {
*/
u8 enable;
u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
#define PTA_ANTENNA_TYPE_DEF (0)
#define PTA_BT_HP_MAXTIME_DEF (2000)
@@ -648,7 +648,7 @@ struct acx_bt_wlan_coex_param {
/* range: 0 - 20 default: 1 */
u8 bt_hp_respected_num;
-} __attribute__ ((packed));
+} __packed;
#define CCA_THRSH_ENABLE_ENERGY_D 0x140A
#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF
@@ -660,7 +660,7 @@ struct acx_energy_detection {
u16 rx_cca_threshold;
u8 tx_energy_detection;
u8 pad;
-} __attribute__ ((packed));
+} __packed;
#define BCN_RX_TIMEOUT_DEF_VALUE 10000
#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000
@@ -679,14 +679,14 @@ struct acx_beacon_broadcast {
/* Consecutive PS Poll failures before updating the host */
u8 ps_poll_threshold;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
struct acx_event_mask {
struct acx_header header;
u32 event_mask;
u32 high_event_mask; /* Unused */
-} __attribute__ ((packed));
+} __packed;
#define CFG_RX_FCS BIT(2)
#define CFG_RX_ALL_GOOD BIT(3)
@@ -729,7 +729,7 @@ struct acx_fw_gen_frame_rates {
u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */
u8 tx_mgt_frame_rate;
u8 tx_mgt_frame_mod;
-} __attribute__ ((packed));
+} __packed;
/* STA MAC */
struct acx_dot11_station_id {
@@ -737,28 +737,28 @@ struct acx_dot11_station_id {
u8 mac[ETH_ALEN];
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
struct acx_feature_config {
struct acx_header header;
u32 options;
u32 data_flow_options;
-} __attribute__ ((packed));
+} __packed;
struct acx_current_tx_power {
struct acx_header header;
u8 current_tx_power;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
struct acx_dot11_default_key {
struct acx_header header;
u8 id;
u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
struct acx_tsf_info {
struct acx_header header;
@@ -769,7 +769,7 @@ struct acx_tsf_info {
u32 last_TBTT_lsb;
u8 last_dtim_count;
u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
enum acx_wake_up_event {
WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/
@@ -785,7 +785,7 @@ struct acx_wake_up_condition {
u8 wake_up_event; /* Only one bit can be set */
u8 listen_interval;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
struct acx_aid {
struct acx_header header;
@@ -795,7 +795,7 @@ struct acx_aid {
*/
u16 aid;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
enum acx_preamble_type {
ACX_PREAMBLE_LONG = 0,
@@ -811,7 +811,7 @@ struct acx_preamble {
*/
u8 preamble;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
enum acx_ctsprotect_type {
CTSPROTECT_DISABLE = 0,
@@ -822,11 +822,11 @@ struct acx_ctsprotect {
struct acx_header header;
u8 ctsprotect;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
struct acx_tx_statistics {
u32 internal_desc_overflow;
-} __attribute__ ((packed));
+} __packed;
struct acx_rx_statistics {
u32 out_of_mem;
@@ -837,14 +837,14 @@ struct acx_rx_statistics {
u32 xfr_hint_trig;
u32 path_reset;
u32 reset_counter;
-} __attribute__ ((packed));
+} __packed;
struct acx_dma_statistics {
u32 rx_requested;
u32 rx_errors;
u32 tx_requested;
u32 tx_errors;
-} __attribute__ ((packed));
+} __packed;
struct acx_isr_statistics {
/* host command complete */
@@ -903,7 +903,7 @@ struct acx_isr_statistics {
/* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
u32 low_rssi;
-} __attribute__ ((packed));
+} __packed;
struct acx_wep_statistics {
/* WEP address keys configured */
@@ -925,7 +925,7 @@ struct acx_wep_statistics {
/* WEP decrypt interrupts */
u32 interrupt;
-} __attribute__ ((packed));
+} __packed;
#define ACX_MISSED_BEACONS_SPREAD 10
@@ -985,12 +985,12 @@ struct acx_pwr_statistics {
/* the number of beacons in awake mode */
u32 rcvd_awake_beacons;
-} __attribute__ ((packed));
+} __packed;
struct acx_mic_statistics {
u32 rx_pkts;
u32 calc_failure;
-} __attribute__ ((packed));
+} __packed;
struct acx_aes_statistics {
u32 encrypt_fail;
@@ -999,7 +999,7 @@ struct acx_aes_statistics {
u32 decrypt_packets;
u32 encrypt_interrupt;
u32 decrypt_interrupt;
-} __attribute__ ((packed));
+} __packed;
struct acx_event_statistics {
u32 heart_beat;
@@ -1010,7 +1010,7 @@ struct acx_event_statistics {
u32 oom_late;
u32 phy_transmit_error;
u32 tx_stuck;
-} __attribute__ ((packed));
+} __packed;
struct acx_ps_statistics {
u32 pspoll_timeouts;
@@ -1020,7 +1020,7 @@ struct acx_ps_statistics {
u32 pspoll_max_apturn;
u32 pspoll_utilization;
u32 upsd_utilization;
-} __attribute__ ((packed));
+} __packed;
struct acx_rxpipe_statistics {
u32 rx_prep_beacon_drop;
@@ -1028,7 +1028,7 @@ struct acx_rxpipe_statistics {
u32 beacon_buffer_thres_host_int_trig_rx_data;
u32 missed_beacon_host_int_trig_rx_data;
u32 tx_xfr_host_int_trig_rx_data;
-} __attribute__ ((packed));
+} __packed;
struct acx_statistics {
struct acx_header header;
@@ -1044,7 +1044,7 @@ struct acx_statistics {
struct acx_event_statistics event;
struct acx_ps_statistics ps;
struct acx_rxpipe_statistics rxpipe;
-} __attribute__ ((packed));
+} __packed;
#define ACX_MAX_RATE_CLASSES 8
#define ACX_RATE_MASK_UNSPECIFIED 0
@@ -1063,7 +1063,7 @@ struct acx_rate_policy {
u32 rate_class_cnt;
struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES];
-} __attribute__ ((packed));
+} __packed;
struct wl1251_acx_memory {
__le16 num_stations; /* number of STAs to be supported. */
@@ -1082,7 +1082,7 @@ struct wl1251_acx_memory {
u8 tx_min_mem_block_num;
u8 num_ssid_profiles;
__le16 debug_buffer_size;
-} __attribute__ ((packed));
+} __packed;
#define ACX_RX_DESC_MIN 1
@@ -1094,7 +1094,7 @@ struct wl1251_acx_rx_queue_config {
u8 type;
u8 priority;
__le32 dma_address;
-} __attribute__ ((packed));
+} __packed;
#define ACX_TX_DESC_MIN 1
#define ACX_TX_DESC_MAX 127
@@ -1103,7 +1103,7 @@ struct wl1251_acx_tx_queue_config {
u8 num_descs;
u8 pad[2];
u8 attributes;
-} __attribute__ ((packed));
+} __packed;
#define MAX_TX_QUEUE_CONFIGS 5
#define MAX_TX_QUEUES 4
@@ -1113,7 +1113,7 @@ struct wl1251_acx_config_memory {
struct wl1251_acx_memory mem_config;
struct wl1251_acx_rx_queue_config rx_queue_config;
struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS];
-} __attribute__ ((packed));
+} __packed;
struct wl1251_acx_mem_map {
struct acx_header header;
@@ -1147,7 +1147,7 @@ struct wl1251_acx_mem_map {
/* Number of blocks FW allocated for RX packets */
u32 num_rx_mem_blocks;
-} __attribute__ ((packed));
+} __packed;
struct wl1251_acx_wr_tbtt_and_dtim {
@@ -1164,7 +1164,7 @@ struct wl1251_acx_wr_tbtt_and_dtim {
*/
u8 dtim;
u8 padding;
-} __attribute__ ((packed));
+} __packed;
struct wl1251_acx_ac_cfg {
struct acx_header header;
@@ -1194,7 +1194,7 @@ struct wl1251_acx_ac_cfg {
/* The TX Op Limit (in microseconds) for the access class. */
u16 txop_limit;
-} __attribute__ ((packed));
+} __packed;
enum wl1251_acx_channel_type {
@@ -1245,7 +1245,7 @@ struct wl1251_acx_tid_cfg {
/* not supported */
u32 apsdconf[2];
-} __attribute__ ((packed));
+} __packed;
/*************************************************************************
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index 4ad67cae94d2..7e70dd5a21b8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -106,7 +106,7 @@ struct wl1251_cmd_header {
u16 status;
/* payload */
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
struct wl1251_command {
struct wl1251_cmd_header header;
@@ -201,7 +201,7 @@ struct wl1251_scan_parameters {
u8 ssid_len;
u8 ssid[32];
-} __attribute__ ((packed));
+} __packed;
struct wl1251_scan_ch_parameters {
u32 min_duration; /* in TU */
@@ -218,7 +218,7 @@ struct wl1251_scan_ch_parameters {
u8 tx_power_att;
u8 channel;
u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
/* SCAN parameters */
#define SCAN_MAX_NUM_OF_CHANNELS 16
@@ -228,7 +228,7 @@ struct wl1251_cmd_scan {
struct wl1251_scan_parameters params;
struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
-} __attribute__ ((packed));
+} __packed;
enum {
BSS_TYPE_IBSS = 0,
@@ -276,14 +276,14 @@ struct cmd_join {
u8 tx_mgt_frame_rate; /* OBSOLETE */
u8 tx_mgt_frame_mod; /* OBSOLETE */
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
struct cmd_enabledisable_path {
struct wl1251_cmd_header header;
u8 channel;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
#define WL1251_MAX_TEMPLATE_SIZE 300
@@ -292,7 +292,7 @@ struct wl1251_cmd_packet_template {
__le16 size;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
#define TIM_ELE_ID 5
#define PARTIAL_VBM_MAX 251
@@ -304,7 +304,7 @@ struct wl1251_tim {
u8 dtim_period;
u8 bitmap_ctrl;
u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */
-} __attribute__ ((packed));
+} __packed;
/* Virtual Bit Map update */
struct wl1251_cmd_vbm_update {
@@ -312,7 +312,7 @@ struct wl1251_cmd_vbm_update {
__le16 len;
u8 padding[2];
struct wl1251_tim tim;
-} __attribute__ ((packed));
+} __packed;
enum wl1251_cmd_ps_mode {
STATION_ACTIVE_MODE,
@@ -333,7 +333,7 @@ struct wl1251_cmd_ps_params {
u8 hang_over_period;
u16 null_data_rate;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
struct wl1251_cmd_trigger_scan_to {
struct wl1251_cmd_header header;
@@ -411,7 +411,7 @@ struct wl1251_cmd_set_keys {
u8 key[MAX_KEY_SIZE];
u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
-} __attribute__ ((packed));
+} __packed;
#endif /* __WL1251_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl12xx/wl1251_event.h
index be0ac54d6246..f48a2b66bc5a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.h
+++ b/drivers/net/wireless/wl12xx/wl1251_event.h
@@ -82,7 +82,7 @@ struct event_debug_report {
u32 report_1;
u32 report_2;
u32 report_3;
-} __attribute__ ((packed));
+} __packed;
struct event_mailbox {
u32 events_vector;
@@ -112,7 +112,7 @@ struct event_mailbox {
struct event_debug_report report;
u8 average_snr_level;
u8 padding[19];
-} __attribute__ ((packed));
+} __packed;
int wl1251_event_unmask(struct wl1251 *wl);
void wl1251_event_mbox_config(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 00b24282fc73..c8f268951e10 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -124,7 +124,7 @@ static int wl1251_fetch_nvs(struct wl1251 *wl)
}
wl->nvs_len = fw->size;
- wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL);
+ wl->nvs = kmemdup(fw->data, wl->nvs_len, GFP_KERNEL);
if (!wl->nvs) {
wl1251_error("could not allocate memory for the nvs file");
@@ -132,8 +132,6 @@ static int wl1251_fetch_nvs(struct wl1251 *wl)
goto out;
}
- memcpy(wl->nvs, fw->data, wl->nvs_len);
-
ret = 0;
out:
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl12xx/wl1251_rx.h
index 563a3fde40fb..da4e53406a0e 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.h
@@ -117,7 +117,7 @@ struct wl1251_rx_descriptor {
s8 rssi; /* in dB */
u8 rcpi; /* in dB */
u8 snr; /* in dB */
-} __attribute__ ((packed));
+} __packed;
void wl1251_rx(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index c561332e7009..b901b6135654 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -37,11 +37,17 @@
#define SDIO_DEVICE_ID_TI_WL1251 0x9066
#endif
+struct wl1251_sdio {
+ struct sdio_func *func;
+ u32 elp_val;
+};
+
static struct wl12xx_platform_data *wl12xx_board_data;
static struct sdio_func *wl_to_func(struct wl1251 *wl)
{
- return wl->if_priv;
+ struct wl1251_sdio *wl_sdio = wl->if_priv;
+ return wl_sdio->func;
}
static void wl1251_sdio_interrupt(struct sdio_func *func)
@@ -90,10 +96,17 @@ static void wl1251_sdio_write(struct wl1251 *wl, int addr,
static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
{
int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
-
+ struct wl1251_sdio *wl_sdio = wl->if_priv;
+ struct sdio_func *func = wl_sdio->func;
+
+ /*
+ * The hardware only supports RAW (read after write) access for
+ * reading, regular sdio_readb won't work here (it interprets
+ * the unused bits of CMD52 as write data even if we send read
+ * request).
+ */
sdio_claim_host(func);
- *val = sdio_readb(func, addr, &ret);
+ *val = sdio_writeb_readb(func, wl_sdio->elp_val, addr, &ret);
sdio_release_host(func);
if (ret)
@@ -103,7 +116,8 @@ static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
{
int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
+ struct wl1251_sdio *wl_sdio = wl->if_priv;
+ struct sdio_func *func = wl_sdio->func;
sdio_claim_host(func);
sdio_writeb(func, val, addr, &ret);
@@ -111,6 +125,8 @@ static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
if (ret)
wl1251_error("sdio_writeb failed (%d)", ret);
+ else
+ wl_sdio->elp_val = val;
}
static void wl1251_sdio_reset(struct wl1251 *wl)
@@ -197,6 +213,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
int ret;
struct wl1251 *wl;
struct ieee80211_hw *hw;
+ struct wl1251_sdio *wl_sdio;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
@@ -204,6 +221,12 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl = hw->priv;
+ wl_sdio = kzalloc(sizeof(*wl_sdio), GFP_KERNEL);
+ if (wl_sdio == NULL) {
+ ret = -ENOMEM;
+ goto out_free_hw;
+ }
+
sdio_claim_host(func);
ret = sdio_enable_func(func);
if (ret)
@@ -213,7 +236,8 @@ static int wl1251_sdio_probe(struct sdio_func *func,
sdio_release_host(func);
SET_IEEE80211_DEV(hw, &func->dev);
- wl->if_priv = func;
+ wl_sdio->func = func;
+ wl->if_priv = wl_sdio;
wl->if_ops = &wl1251_sdio_ops;
wl->set_power = wl1251_sdio_set_power;
@@ -259,6 +283,8 @@ disable:
sdio_disable_func(func);
release:
sdio_release_host(func);
+ kfree(wl_sdio);
+out_free_hw:
wl1251_free_hw(wl);
return ret;
}
@@ -266,9 +292,11 @@ release:
static void __devexit wl1251_sdio_remove(struct sdio_func *func)
{
struct wl1251 *wl = sdio_get_drvdata(func);
+ struct wl1251_sdio *wl_sdio = wl->if_priv;
if (wl->irq)
free_irq(wl->irq, wl);
+ kfree(wl_sdio);
wl1251_free_hw(wl);
sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 55856c6bb97a..65c4be8c2e80 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -109,7 +109,7 @@ struct tx_control {
unsigned xfer_pad:1;
unsigned reserved:7;
-} __attribute__ ((packed));
+} __packed;
struct tx_double_buffer_desc {
@@ -156,7 +156,7 @@ struct tx_double_buffer_desc {
u8 num_mem_blocks;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
enum {
TX_SUCCESS = 0,
@@ -208,7 +208,7 @@ struct tx_result {
/* See done_1 */
u8 done_2;
-} __attribute__ ((packed));
+} __packed;
static inline int wl1251_tx_get_queue(int queue)
{
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 6f1b6b5640c0..ec09f0d40ca2 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -33,6 +33,7 @@
#include <net/mac80211.h>
#include "wl1271_conf.h"
+#include "wl1271_ini.h"
#define DRIVER_NAME "wl1271"
#define DRIVER_PREFIX DRIVER_NAME ": "
@@ -116,33 +117,6 @@ enum {
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
-/* NVS data structure */
-#define WL1271_NVS_SECTION_SIZE 468
-
-#define WL1271_NVS_GENERAL_PARAMS_SIZE 57
-#define WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED \
- (WL1271_NVS_GENERAL_PARAMS_SIZE + 1)
-#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE 17
-#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED \
- (WL1271_NVS_STAT_RADIO_PARAMS_SIZE + 1)
-#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE 65
-#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED \
- (WL1271_NVS_DYN_RADIO_PARAMS_SIZE + 1)
-#define WL1271_NVS_FEM_COUNT 2
-#define WL1271_NVS_INI_SPARE_SIZE 124
-
-struct wl1271_nvs_file {
- /* NVS section */
- u8 nvs[WL1271_NVS_SECTION_SIZE];
-
- /* INI section */
- u8 general_params[WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED];
- u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED];
- u8 dyn_radio_params[WL1271_NVS_FEM_COUNT]
- [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED];
- u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE];
-} __attribute__ ((packed));
-
/*
* Enable/disable 802.11a support for WL1273
*/
@@ -317,7 +291,7 @@ struct wl1271_fw_status {
__le32 tx_released_blks[NUM_TX_QUEUES];
__le32 fw_localtime;
__le32 padding[2];
-} __attribute__ ((packed));
+} __packed;
struct wl1271_rx_mem_pool_addr {
u32 addr;
@@ -325,6 +299,7 @@ struct wl1271_rx_mem_pool_addr {
};
struct wl1271_scan {
+ struct cfg80211_scan_request *req;
u8 state;
u8 ssid[IW_ESSID_MAX_SIZE+1];
size_t ssid_len;
@@ -375,6 +350,7 @@ struct wl1271 {
#define WL1271_FLAG_IRQ_PENDING (9)
#define WL1271_FLAG_IRQ_RUNNING (10)
#define WL1271_FLAG_IDLE (11)
+#define WL1271_FLAG_IDLE_REQUESTED (12)
unsigned long flags;
struct wl1271_partition_set part;
@@ -421,6 +397,7 @@ struct wl1271 {
/* Pending TX frames */
struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
+ int tx_frames_cnt;
/* Security sequence number counters */
u8 tx_security_last_seq;
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 420e7e2fc021..4c87e601df2f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -75,7 +75,7 @@ struct acx_header {
/* payload length (not including headers */
__le16 len;
-} __attribute__ ((packed));
+} __packed;
struct acx_error_counter {
struct acx_header header;
@@ -98,7 +98,7 @@ struct acx_error_counter {
/* the number of missed sequence numbers in the squentially */
/* values of frames seq numbers */
__le32 seq_num_miss;
-} __attribute__ ((packed));
+} __packed;
struct acx_revision {
struct acx_header header;
@@ -127,7 +127,7 @@ struct acx_revision {
* bits 24 - 31: Chip ID - The WiLink chip ID.
*/
__le32 hw_version;
-} __attribute__ ((packed));
+} __packed;
enum wl1271_psm_mode {
/* Active mode */
@@ -149,7 +149,7 @@ struct acx_sleep_auth {
/* 2 - ELP mode: Deep / Max sleep*/
u8 sleep_auth;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
enum {
HOSTIF_PCI_MASTER_HOST_INDIRECT,
@@ -187,7 +187,7 @@ struct acx_rx_msdu_lifetime {
* firmware discards the MSDU.
*/
__le32 lifetime;
-} __attribute__ ((packed));
+} __packed;
/*
* RX Config Options Table
@@ -275,13 +275,13 @@ struct acx_rx_config {
__le32 config_options;
__le32 filter_options;
-} __attribute__ ((packed));
+} __packed;
struct acx_packet_detection {
struct acx_header header;
__le32 threshold;
-} __attribute__ ((packed));
+} __packed;
enum acx_slot_type {
@@ -299,7 +299,7 @@ struct acx_slot {
u8 wone_index; /* Reserved */
u8 slot_time;
u8 reserved[6];
-} __attribute__ ((packed));
+} __packed;
#define ACX_MC_ADDRESS_GROUP_MAX (8)
@@ -312,21 +312,21 @@ struct acx_dot11_grp_addr_tbl {
u8 num_groups;
u8 pad[2];
u8 mac_table[ADDRESS_GROUP_MAX_LEN];
-} __attribute__ ((packed));
+} __packed;
struct acx_rx_timeout {
struct acx_header header;
__le16 ps_poll_timeout;
__le16 upsd_timeout;
-} __attribute__ ((packed));
+} __packed;
struct acx_rts_threshold {
struct acx_header header;
__le16 threshold;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
struct acx_beacon_filter_option {
struct acx_header header;
@@ -342,7 +342,7 @@ struct acx_beacon_filter_option {
*/
u8 max_num_beacons;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
/*
* ACXBeaconFilterEntry (not 221)
@@ -383,21 +383,21 @@ struct acx_beacon_filter_ie_table {
u8 num_ie;
u8 pad[3];
u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
-} __attribute__ ((packed));
+} __packed;
struct acx_conn_monit_params {
struct acx_header header;
__le32 synch_fail_thold; /* number of beacons missed */
__le32 bss_lose_timeout; /* number of TU's from synch fail */
-} __attribute__ ((packed));
+} __packed;
struct acx_bt_wlan_coex {
struct acx_header header;
u8 enable;
u8 pad[3];
-} __attribute__ ((packed));
+} __packed;
struct acx_bt_wlan_coex_param {
struct acx_header header;
@@ -405,7 +405,7 @@ struct acx_bt_wlan_coex_param {
__le32 params[CONF_SG_PARAMS_MAX];
u8 param_idx;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
struct acx_dco_itrim_params {
struct acx_header header;
@@ -413,7 +413,7 @@ struct acx_dco_itrim_params {
u8 enable;
u8 padding[3];
__le32 timeout;
-} __attribute__ ((packed));
+} __packed;
struct acx_energy_detection {
struct acx_header header;
@@ -422,7 +422,7 @@ struct acx_energy_detection {
__le16 rx_cca_threshold;
u8 tx_energy_detection;
u8 pad;
-} __attribute__ ((packed));
+} __packed;
struct acx_beacon_broadcast {
struct acx_header header;
@@ -436,14 +436,14 @@ struct acx_beacon_broadcast {
/* Consecutive PS Poll failures before updating the host */
u8 ps_poll_threshold;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
struct acx_event_mask {
struct acx_header header;
__le32 event_mask;
__le32 high_event_mask; /* Unused */
-} __attribute__ ((packed));
+} __packed;
#define CFG_RX_FCS BIT(2)
#define CFG_RX_ALL_GOOD BIT(3)
@@ -488,14 +488,14 @@ struct acx_feature_config {
__le32 options;
__le32 data_flow_options;
-} __attribute__ ((packed));
+} __packed;
struct acx_current_tx_power {
struct acx_header header;
u8 current_tx_power;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
struct acx_wake_up_condition {
struct acx_header header;
@@ -503,7 +503,7 @@ struct acx_wake_up_condition {
u8 wake_up_event; /* Only one bit can be set */
u8 listen_interval;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
struct acx_aid {
struct acx_header header;
@@ -513,7 +513,7 @@ struct acx_aid {
*/
__le16 aid;
u8 pad[2];
-} __attribute__ ((packed));
+} __packed;
enum acx_preamble_type {
ACX_PREAMBLE_LONG = 0,
@@ -529,7 +529,7 @@ struct acx_preamble {
*/
u8 preamble;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
enum acx_ctsprotect_type {
CTSPROTECT_DISABLE = 0,
@@ -540,11 +540,11 @@ struct acx_ctsprotect {
struct acx_header header;
u8 ctsprotect;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
struct acx_tx_statistics {
__le32 internal_desc_overflow;
-} __attribute__ ((packed));
+} __packed;
struct acx_rx_statistics {
__le32 out_of_mem;
@@ -555,14 +555,14 @@ struct acx_rx_statistics {
__le32 xfr_hint_trig;
__le32 path_reset;
__le32 reset_counter;
-} __attribute__ ((packed));
+} __packed;
struct acx_dma_statistics {
__le32 rx_requested;
__le32 rx_errors;
__le32 tx_requested;
__le32 tx_errors;
-} __attribute__ ((packed));
+} __packed;
struct acx_isr_statistics {
/* host command complete */
@@ -621,7 +621,7 @@ struct acx_isr_statistics {
/* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
__le32 low_rssi;
-} __attribute__ ((packed));
+} __packed;
struct acx_wep_statistics {
/* WEP address keys configured */
@@ -643,7 +643,7 @@ struct acx_wep_statistics {
/* WEP decrypt interrupts */
__le32 interrupt;
-} __attribute__ ((packed));
+} __packed;
#define ACX_MISSED_BEACONS_SPREAD 10
@@ -703,12 +703,12 @@ struct acx_pwr_statistics {
/* the number of beacons in awake mode */
__le32 rcvd_awake_beacons;
-} __attribute__ ((packed));
+} __packed;
struct acx_mic_statistics {
__le32 rx_pkts;
__le32 calc_failure;
-} __attribute__ ((packed));
+} __packed;
struct acx_aes_statistics {
__le32 encrypt_fail;
@@ -717,7 +717,7 @@ struct acx_aes_statistics {
__le32 decrypt_packets;
__le32 encrypt_interrupt;
__le32 decrypt_interrupt;
-} __attribute__ ((packed));
+} __packed;
struct acx_event_statistics {
__le32 heart_beat;
@@ -728,7 +728,7 @@ struct acx_event_statistics {
__le32 oom_late;
__le32 phy_transmit_error;
__le32 tx_stuck;
-} __attribute__ ((packed));
+} __packed;
struct acx_ps_statistics {
__le32 pspoll_timeouts;
@@ -738,7 +738,7 @@ struct acx_ps_statistics {
__le32 pspoll_max_apturn;
__le32 pspoll_utilization;
__le32 upsd_utilization;
-} __attribute__ ((packed));
+} __packed;
struct acx_rxpipe_statistics {
__le32 rx_prep_beacon_drop;
@@ -746,7 +746,7 @@ struct acx_rxpipe_statistics {
__le32 beacon_buffer_thres_host_int_trig_rx_data;
__le32 missed_beacon_host_int_trig_rx_data;
__le32 tx_xfr_host_int_trig_rx_data;
-} __attribute__ ((packed));
+} __packed;
struct acx_statistics {
struct acx_header header;
@@ -762,7 +762,7 @@ struct acx_statistics {
struct acx_event_statistics event;
struct acx_ps_statistics ps;
struct acx_rxpipe_statistics rxpipe;
-} __attribute__ ((packed));
+} __packed;
struct acx_rate_class {
__le32 enabled_rates;
@@ -780,7 +780,7 @@ struct acx_rate_policy {
__le32 rate_class_cnt;
struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
-} __attribute__ ((packed));
+} __packed;
struct acx_ac_cfg {
struct acx_header header;
@@ -790,7 +790,7 @@ struct acx_ac_cfg {
u8 aifsn;
u8 reserved;
__le16 tx_op_limit;
-} __attribute__ ((packed));
+} __packed;
struct acx_tid_config {
struct acx_header header;
@@ -801,19 +801,19 @@ struct acx_tid_config {
u8 ack_policy;
u8 padding[3];
__le32 apsd_conf[2];
-} __attribute__ ((packed));
+} __packed;
struct acx_frag_threshold {
struct acx_header header;
__le16 frag_threshold;
u8 padding[2];
-} __attribute__ ((packed));
+} __packed;
struct acx_tx_config_options {
struct acx_header header;
__le16 tx_compl_timeout; /* msec */
__le16 tx_compl_threshold; /* number of packets */
-} __attribute__ ((packed));
+} __packed;
#define ACX_RX_MEM_BLOCKS 70
#define ACX_TX_MIN_MEM_BLOCKS 40
@@ -828,7 +828,7 @@ struct wl1271_acx_config_memory {
u8 num_stations;
u8 num_ssid_profiles;
__le32 total_tx_descriptors;
-} __attribute__ ((packed));
+} __packed;
struct wl1271_acx_mem_map {
struct acx_header header;
@@ -872,7 +872,7 @@ struct wl1271_acx_mem_map {
u8 *rx_cbuf;
__le32 rx_ctrl;
__le32 tx_ctrl;
-} __attribute__ ((packed));
+} __packed;
struct wl1271_acx_rx_config_opt {
struct acx_header header;
@@ -882,7 +882,7 @@ struct wl1271_acx_rx_config_opt {
__le16 timeout;
u8 queue_type;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
struct wl1271_acx_bet_enable {
@@ -891,7 +891,7 @@ struct wl1271_acx_bet_enable {
u8 enable;
u8 max_consecutive;
u8 padding[2];
-} __attribute__ ((packed));
+} __packed;
#define ACX_IPV4_VERSION 4
#define ACX_IPV6_VERSION 6
@@ -905,7 +905,7 @@ struct wl1271_acx_arp_filter {
requests directed to this IP address will pass
through. For IPv4, the first four bytes are
used. */
-} __attribute__((packed));
+} __packed;
struct wl1271_acx_pm_config {
struct acx_header header;
@@ -913,14 +913,14 @@ struct wl1271_acx_pm_config {
__le32 host_clk_settling_time;
u8 host_fast_wakeup_support;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
struct wl1271_acx_keep_alive_mode {
struct acx_header header;
u8 enabled;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
enum {
ACX_KEEP_ALIVE_NO_TX = 0,
@@ -940,7 +940,7 @@ struct wl1271_acx_keep_alive_config {
u8 tpl_validation;
u8 trigger;
u8 padding;
-} __attribute__ ((packed));
+} __packed;
enum {
WL1271_ACX_TRIG_TYPE_LEVEL = 0,
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 19393e236e2c..530678e45a13 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -212,8 +212,8 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
- memcpy(gen_parms->params, wl->nvs->general_params,
- WL1271_NVS_GENERAL_PARAMS_SIZE);
+ memcpy(&gen_parms->general_params, &wl->nvs->general_params,
+ sizeof(struct wl1271_ini_general_params));
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
if (ret < 0)
@@ -238,13 +238,20 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
- memcpy(radio_parms->stat_radio_params, wl->nvs->stat_radio_params,
- WL1271_NVS_STAT_RADIO_PARAMS_SIZE);
- memcpy(radio_parms->dyn_radio_params,
- wl->nvs->dyn_radio_params[rparam->fem],
- WL1271_NVS_DYN_RADIO_PARAMS_SIZE);
-
- /* FIXME: current NVS is missing 5GHz parameters */
+ /* 2.4GHz parameters */
+ memcpy(&radio_parms->static_params_2, &wl->nvs->stat_radio_params_2,
+ sizeof(struct wl1271_ini_band_params_2));
+ memcpy(&radio_parms->dyn_params_2,
+ &wl->nvs->dyn_radio_params_2[rparam->fem].params,
+ sizeof(struct wl1271_ini_fem_params_2));
+
+ /* 5GHz parameters */
+ memcpy(&radio_parms->static_params_5,
+ &wl->nvs->stat_radio_params_5,
+ sizeof(struct wl1271_ini_band_params_5));
+ memcpy(&radio_parms->dyn_params_5,
+ &wl->nvs->dyn_radio_params_5[rparam->fem].params,
+ sizeof(struct wl1271_ini_fem_params_5));
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
radio_parms, sizeof(*radio_parms));
@@ -329,12 +336,6 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
join->channel = wl->channel;
join->ssid_len = wl->ssid_len;
memcpy(join->ssid, wl->ssid, wl->ssid_len);
- join->ctrl = WL1271_JOIN_CMD_CTRL_TX_FLUSH;
-
- /* increment the session counter */
- wl->session_counter++;
- if (wl->session_counter >= SESSION_COUNTER_MAX)
- wl->session_counter = 0;
join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
@@ -517,7 +518,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
ps_params->send_null_data = send;
ps_params->retries = 5;
ps_params->hang_over_period = 1;
- ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
+ ps_params->null_data_rate = cpu_to_le32(wl->basic_rate_set);
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
sizeof(*ps_params), 0);
@@ -567,7 +568,7 @@ out:
}
int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len, u8 active_scan,
+ struct cfg80211_scan_request *req, u8 active_scan,
u8 high_prio, u8 band, u8 probe_requests)
{
@@ -648,7 +649,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
}
ret = wl1271_cmd_build_probe_req(wl, ssid, ssid_len,
- ie, ie_len, ieee_band);
+ req->ie, req->ie_len, ieee_band);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -684,7 +685,9 @@ int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
memcpy(wl->scan.ssid, ssid, ssid_len);
} else
wl->scan.ssid_len = 0;
- }
+ wl->scan.req = req;
+ } else
+ wl->scan.req = NULL;
}
ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index f2820b42a943..f5745d829c9b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -42,7 +42,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len, u8 active_scan,
+ struct cfg80211_scan_request *req, u8 active_scan,
u8 high_prio, u8 band, u8 probe_requests);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
void *buf, size_t buf_len, int index, u32 rates);
@@ -136,14 +136,14 @@ struct wl1271_cmd_header {
__le16 status;
/* payload */
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
#define WL1271_CMD_MAX_PARAMS 572
struct wl1271_command {
struct wl1271_cmd_header header;
u8 parameters[WL1271_CMD_MAX_PARAMS];
-} __attribute__ ((packed));
+} __packed;
enum {
CMD_MAILBOX_IDLE = 0,
@@ -196,7 +196,7 @@ struct cmd_read_write_memory {
of this field is the Host in WRITE command or the Wilink in READ
command. */
u8 value[MAX_READ_SIZE];
-} __attribute__ ((packed));
+} __packed;
#define CMDMBOX_HEADER_LEN 4
#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -243,14 +243,14 @@ struct wl1271_cmd_join {
u8 ssid[IW_ESSID_MAX_SIZE];
u8 ctrl; /* JOIN_CMD_CTRL_* */
u8 reserved[3];
-} __attribute__ ((packed));
+} __packed;
struct cmd_enabledisable_path {
struct wl1271_cmd_header header;
u8 channel;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
#define WL1271_RATE_AUTOMATIC 0
@@ -266,7 +266,7 @@ struct wl1271_cmd_template_set {
u8 aflags;
u8 reserved;
u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE];
-} __attribute__ ((packed));
+} __packed;
#define TIM_ELE_ID 5
#define PARTIAL_VBM_MAX 251
@@ -278,7 +278,7 @@ struct wl1271_tim {
u8 dtim_period;
u8 bitmap_ctrl;
u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */
-} __attribute__ ((packed));
+} __packed;
enum wl1271_cmd_ps_mode {
STATION_ACTIVE_MODE,
@@ -298,7 +298,7 @@ struct wl1271_cmd_ps_params {
*/
u8 hang_over_period;
__le32 null_data_rate;
-} __attribute__ ((packed));
+} __packed;
/* HW encryption keys */
#define NUM_ACCESS_CATEGORIES_COPY 4
@@ -348,7 +348,7 @@ struct wl1271_cmd_set_keys {
u8 key[MAX_KEY_SIZE];
__le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
__le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
-} __attribute__ ((packed));
+} __packed;
#define WL1271_SCAN_MAX_CHANNELS 24
@@ -385,7 +385,7 @@ struct basic_scan_params {
u8 use_ssid_list;
u8 scan_tag;
u8 padding2;
-} __attribute__ ((packed));
+} __packed;
struct basic_scan_channel_params {
/* Duration in TU to wait for frames on a channel for active scan */
@@ -400,25 +400,25 @@ struct basic_scan_channel_params {
u8 dfs_candidate;
u8 activity_detected;
u8 pad;
-} __attribute__ ((packed));
+} __packed;
struct wl1271_cmd_scan {
struct wl1271_cmd_header header;
struct basic_scan_params params;
struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
-} __attribute__ ((packed));
+} __packed;
struct wl1271_cmd_trigger_scan_to {
struct wl1271_cmd_header header;
__le32 timeout;
-} __attribute__ ((packed));
+} __packed;
struct wl1271_cmd_test_header {
u8 id;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
enum wl1271_channel_tune_bands {
WL1271_CHANNEL_TUNE_BAND_2_4,
@@ -439,25 +439,31 @@ struct wl1271_general_parms_cmd {
struct wl1271_cmd_test_header test;
- u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE];
- s8 reserved[23];
-} __attribute__ ((packed));
+ struct wl1271_ini_general_params general_params;
-#define WL1271_STAT_RADIO_PARAMS_5_SIZE 29
-#define WL1271_DYN_RADIO_PARAMS_5_SIZE 104
+ u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+ u8 sr_sen_n_p;
+ u8 sr_sen_n_p_gain;
+ u8 sr_sen_nrn;
+ u8 sr_sen_prn;
+ u8 padding[3];
+} __packed;
struct wl1271_radio_parms_cmd {
struct wl1271_cmd_header header;
struct wl1271_cmd_test_header test;
- u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE];
- u8 stat_radio_params_5[WL1271_STAT_RADIO_PARAMS_5_SIZE];
+ /* Static radio parameters */
+ struct wl1271_ini_band_params_2 static_params_2;
+ struct wl1271_ini_band_params_5 static_params_5;
- u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE];
- u8 reserved;
- u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE];
-} __attribute__ ((packed));
+ /* Dynamic radio parameters */
+ struct wl1271_ini_fem_params_2 dyn_params_2;
+ u8 padding2;
+ struct wl1271_ini_fem_params_5 dyn_params_5;
+ u8 padding3[2];
+} __packed;
struct wl1271_cmd_cal_channel_tune {
struct wl1271_cmd_header header;
@@ -468,7 +474,7 @@ struct wl1271_cmd_cal_channel_tune {
u8 channel;
__le16 radio_status;
-} __attribute__ ((packed));
+} __packed;
struct wl1271_cmd_cal_update_ref_point {
struct wl1271_cmd_header header;
@@ -479,7 +485,7 @@ struct wl1271_cmd_cal_update_ref_point {
__le32 ref_detector;
u8 sub_band;
u8 padding[3];
-} __attribute__ ((packed));
+} __packed;
#define MAX_TLV_LENGTH 400
#define MAX_NVS_VERSION_LENGTH 12
@@ -501,7 +507,7 @@ struct wl1271_cmd_cal_p2g {
u8 sub_band_mask;
u8 padding2;
-} __attribute__ ((packed));
+} __packed;
/*
@@ -529,6 +535,6 @@ struct wl1271_cmd_disconnect {
u8 type;
u8 padding;
-} __attribute__ ((packed));
+} __packed;
#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index cf37aa6eb137..ca52cdec7a8f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -43,11 +43,11 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
/* FIXME: ie missing! */
wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
- NULL, 0,
- wl->scan.active,
- wl->scan.high_prio,
- WL1271_SCAN_BAND_5_GHZ,
- wl->scan.probe_requests);
+ wl->scan.req,
+ wl->scan.active,
+ wl->scan.high_prio,
+ WL1271_SCAN_BAND_5_GHZ,
+ wl->scan.probe_requests);
} else {
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 58371008f270..43d5aeae1783 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -85,7 +85,7 @@ struct event_debug_report {
__le32 report_1;
__le32 report_2;
__le32 report_3;
-} __attribute__ ((packed));
+} __packed;
#define NUM_OF_RSSI_SNR_TRIGGERS 8
@@ -116,7 +116,7 @@ struct event_mailbox {
u8 ps_status;
u8 reserved_5[29];
-} __attribute__ ((packed));
+} __packed;
int wl1271_event_unmask(struct wl1271 *wl);
void wl1271_event_mbox_config(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ini.h b/drivers/net/wireless/wl12xx/wl1271_ini.h
new file mode 100644
index 000000000000..2313047d4015
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_ini.h
@@ -0,0 +1,123 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL1271_INI_H__
+#define __WL1271_INI_H__
+
+#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16
+
+struct wl1271_ini_general_params {
+ u8 ref_clock;
+ u8 settling_time;
+ u8 clk_valid_on_wakeup;
+ u8 dc2dc_mode;
+ u8 dual_mode_select;
+ u8 tx_bip_fem_auto_detect;
+ u8 tx_bip_fem_manufacturer;
+ u8 general_settings;
+ u8 sr_state;
+ u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+ u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+ u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+} __packed;
+
+#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15
+
+struct wl1271_ini_band_params_2 {
+ u8 rx_trace_insertion_loss;
+ u8 tx_trace_loss;
+ u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
+} __packed;
+
+#define WL1271_INI_RATE_GROUP_COUNT 6
+#define WL1271_INI_CHANNEL_COUNT_2 14
+
+struct wl1271_ini_fem_params_2 {
+ __le16 tx_bip_ref_pd_voltage;
+ u8 tx_bip_ref_power;
+ u8 tx_bip_ref_offset;
+ u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT];
+ u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT];
+ u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT];
+ u8 tx_per_chan_pwr_limits_11b[WL1271_INI_CHANNEL_COUNT_2];
+ u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_2];
+ u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT];
+ u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT];
+ u8 rx_fem_insertion_loss;
+ u8 degraded_low_to_normal_thr;
+ u8 normal_to_degraded_high_thr;
+} __packed;
+
+#define WL1271_INI_CHANNEL_COUNT_5 35
+#define WL1271_INI_SUB_BAND_COUNT_5 7
+
+struct wl1271_ini_band_params_5 {
+ u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 tx_trace_loss[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
+} __packed;
+
+struct wl1271_ini_fem_params_5 {
+ __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 tx_bip_ref_offset[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 tx_per_rate_pwr_limits_normal[WL1271_INI_RATE_GROUP_COUNT];
+ u8 tx_per_rate_pwr_limits_degraded[WL1271_INI_RATE_GROUP_COUNT];
+ u8 tx_per_rate_pwr_limits_extreme[WL1271_INI_RATE_GROUP_COUNT];
+ u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_5];
+ u8 tx_pd_vs_rate_offsets[WL1271_INI_RATE_GROUP_COUNT];
+ u8 tx_ibias[WL1271_INI_RATE_GROUP_COUNT];
+ u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
+ u8 degraded_low_to_normal_thr;
+ u8 normal_to_degraded_high_thr;
+} __packed;
+
+
+/* NVS data structure */
+#define WL1271_INI_NVS_SECTION_SIZE 468
+#define WL1271_INI_FEM_MODULE_COUNT 2
+
+#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800
+
+struct wl1271_nvs_file {
+ /* NVS section */
+ u8 nvs[WL1271_INI_NVS_SECTION_SIZE];
+
+ /* INI section */
+ struct wl1271_ini_general_params general_params;
+ u8 padding1;
+ struct wl1271_ini_band_params_2 stat_radio_params_2;
+ u8 padding2;
+ struct {
+ struct wl1271_ini_fem_params_2 params;
+ u8 padding;
+ } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT];
+ struct wl1271_ini_band_params_5 stat_radio_params_5;
+ u8 padding3;
+ struct {
+ struct wl1271_ini_fem_params_5 params;
+ u8 padding;
+ } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b7d9137851ac..7a14da506d78 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -566,14 +566,21 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
return ret;
}
- if (fw->size != sizeof(struct wl1271_nvs_file)) {
+ /*
+ * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
+ * configurations) can be removed when those NVS files stop floating
+ * around.
+ */
+ if (fw->size != sizeof(struct wl1271_nvs_file) &&
+ (fw->size != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
+ wl1271_11a_enabled())) {
wl1271_error("nvs size is not as expected: %zu != %zu",
fw->size, sizeof(struct wl1271_nvs_file));
ret = -EILSEQ;
goto out;
}
- wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+ wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
@@ -581,7 +588,7 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
goto out;
}
- memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
+ memcpy(wl->nvs, fw->data, fw->size);
out:
release_firmware(fw);
@@ -1044,7 +1051,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
/* let's notify MAC80211 about the remaining pending TX frames */
- wl1271_tx_flush(wl);
+ wl1271_tx_reset(wl);
wl1271_power_off(wl);
memset(wl->bssid, 0, ETH_ALEN);
@@ -1241,6 +1248,42 @@ static u32 wl1271_min_rate_get(struct wl1271 *wl)
return rate;
}
+static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
+{
+ int ret;
+
+ if (idle) {
+ if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ ret = wl1271_unjoin(wl);
+ if (ret < 0)
+ goto out;
+ }
+ wl->rate_set = wl1271_min_rate_get(wl);
+ wl->sta_rate_set = 0;
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ goto out;
+ ret = wl1271_acx_keep_alive_config(
+ wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ if (ret < 0)
+ goto out;
+ set_bit(WL1271_FLAG_IDLE, &wl->flags);
+ } else {
+ /* increment the session counter */
+ wl->session_counter++;
+ if (wl->session_counter >= SESSION_COUNTER_MAX)
+ wl->session_counter = 0;
+ ret = wl1271_dummy_join(wl);
+ if (ret < 0)
+ goto out;
+ clear_bit(WL1271_FLAG_IDLE, &wl->flags);
+ }
+
+out:
+ return ret;
+}
+
static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1271 *wl = hw->priv;
@@ -1255,6 +1298,15 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
conf->power_level,
conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
+ /*
+ * mac80211 will go to idle nearly immediately after transmitting some
+ * frames, such as the deauth. To make sure those frames reach the air,
+ * wait here until the TX queue is fully flushed.
+ */
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (conf->flags & IEEE80211_CONF_IDLE))
+ wl1271_tx_flush(wl);
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -1295,22 +1347,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- if (conf->flags & IEEE80211_CONF_IDLE &&
- test_bit(WL1271_FLAG_JOINED, &wl->flags))
- wl1271_unjoin(wl);
- else if (!(conf->flags & IEEE80211_CONF_IDLE))
- wl1271_dummy_join(wl);
-
- if (conf->flags & IEEE80211_CONF_IDLE) {
- wl->rate_set = wl1271_min_rate_get(wl);
- wl->sta_rate_set = 0;
- wl1271_acx_rate_policies(wl);
- wl1271_acx_keep_alive_config(
- wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
- ACX_KEEP_ALIVE_TPL_INVALID);
- set_bit(WL1271_FLAG_IDLE, &wl->flags);
- } else
- clear_bit(WL1271_FLAG_IDLE, &wl->flags);
+ ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE);
+ if (ret < 0)
+ wl1271_warning("idle mode change failed %d", ret);
}
if (conf->flags & IEEE80211_CONF_PS &&
@@ -1595,13 +1634,11 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
if (wl1271_11a_enabled())
- ret = wl1271_cmd_scan(hw->priv, ssid, len,
- req->ie, req->ie_len, 1, 0,
- WL1271_SCAN_BAND_DUAL, 3);
+ ret = wl1271_cmd_scan(hw->priv, ssid, len, req,
+ 1, 0, WL1271_SCAN_BAND_DUAL, 3);
else
- ret = wl1271_cmd_scan(hw->priv, ssid, len,
- req->ie, req->ie_len, 1, 0,
- WL1271_SCAN_BAND_2_4_GHZ, 3);
+ ret = wl1271_cmd_scan(hw->priv, ssid, len, req,
+ 1, 0, WL1271_SCAN_BAND_2_4_GHZ, 3);
wl1271_ps_elp_sleep(wl);
@@ -1991,7 +2028,7 @@ static struct ieee80211_channel wl1271_channels[] = {
};
/* mapping to indexes for wl1271_rates */
-const static u8 wl1271_rate_to_idx_2ghz[] = {
+static const u8 wl1271_rate_to_idx_2ghz[] = {
/* MCS rates are used only with 11n */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
@@ -2103,7 +2140,7 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
};
/* mapping to indexes for wl1271_rates_5ghz */
-const static u8 wl1271_rate_to_idx_5ghz[] = {
+static const u8 wl1271_rate_to_idx_5ghz[] = {
/* MCS rates are used only with 11n */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
@@ -2139,7 +2176,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
};
-const static u8 *wl1271_band_rate_to_idx[] = {
+static const u8 *wl1271_band_rate_to_idx[] = {
[IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
[IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
};
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index b89be4758e78..13a232333b13 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -113,7 +113,7 @@ struct wl1271_rx_descriptor {
u8 process_id;
u8 pad_len;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
index d3d6f302f705..7059b5cccf0f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -28,7 +28,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
-#include <plat/gpio.h>
+#include <linux/gpio.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
index 554deb4d024e..6e0952f79e9a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -199,7 +199,14 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
len = nla_len(tb[WL1271_TM_ATTR_DATA]);
- if (len != sizeof(struct wl1271_nvs_file)) {
+ /*
+ * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
+ * configurations) can be removed when those NVS files stop floating
+ * around.
+ */
+ if (len != sizeof(struct wl1271_nvs_file) &&
+ (len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
+ wl1271_11a_enabled())) {
wl1271_error("nvs size is not as expected: %zu != %zu",
len, sizeof(struct wl1271_nvs_file));
return -EMSGSIZE;
@@ -209,7 +216,7 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
kfree(wl->nvs);
- wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+ wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
ret = -ENOMEM;
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 62db79508ddf..c592cc2e9fe8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -36,6 +36,7 @@ static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
if (wl->tx_frames[i] == NULL) {
wl->tx_frames[i] = skb;
+ wl->tx_frames_cnt++;
return i;
}
@@ -73,8 +74,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
wl1271_debug(DEBUG_TX,
"tx_allocate: size: %d, blocks: %d, id: %d",
total_len, total_blocks, id);
- } else
+ } else {
wl->tx_frames[id] = NULL;
+ wl->tx_frames_cnt--;
+ }
return ret;
}
@@ -358,6 +361,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
/* return the packet to the stack */
ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[result->id] = NULL;
+ wl->tx_frames_cnt--;
}
/* Called upon reception of a TX complete interrupt */
@@ -412,7 +416,7 @@ void wl1271_tx_complete(struct wl1271 *wl)
}
/* caller must hold wl->mutex */
-void wl1271_tx_flush(struct wl1271 *wl)
+void wl1271_tx_reset(struct wl1271 *wl)
{
int i;
struct sk_buff *skb;
@@ -421,7 +425,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
/* control->flags = 0; FIXME */
while ((skb = skb_dequeue(&wl->tx_queue))) {
- wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb);
+ wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
ieee80211_tx_status(wl->hw, skb);
}
@@ -429,6 +433,32 @@ void wl1271_tx_flush(struct wl1271 *wl)
if (wl->tx_frames[i] != NULL) {
skb = wl->tx_frames[i];
wl->tx_frames[i] = NULL;
+ wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
ieee80211_tx_status(wl->hw, skb);
}
+ wl->tx_frames_cnt = 0;
+}
+
+#define WL1271_TX_FLUSH_TIMEOUT 500000
+
+/* caller must *NOT* hold wl->mutex */
+void wl1271_tx_flush(struct wl1271 *wl)
+{
+ unsigned long timeout;
+ timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
+
+ while (!time_after(jiffies, timeout)) {
+ mutex_lock(&wl->mutex);
+ wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
+ wl->tx_frames_cnt);
+ if ((wl->tx_frames_cnt == 0) &&
+ skb_queue_empty(&wl->tx_queue)) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+ mutex_unlock(&wl->mutex);
+ msleep(1);
+ }
+
+ wl1271_warning("Unable to flush all TX buffers, timed out.");
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 3b8b7ac253fd..48bf92621c03 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -80,7 +80,7 @@ struct wl1271_tx_hw_descr {
/* Identifier of the remote STA in IBSS, 1 in infra-BSS */
u8 aid;
u8 reserved;
-} __attribute__ ((packed));
+} __packed;
enum wl1271_tx_hw_res_status {
TX_SUCCESS = 0,
@@ -115,13 +115,13 @@ struct wl1271_tx_hw_res_descr {
u8 rate_class_index;
/* for 4-byte alignment. */
u8 spare;
-} __attribute__ ((packed));
+} __packed;
struct wl1271_tx_hw_res_if {
__le32 tx_result_fw_counter;
__le32 tx_result_host_counter;
struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
-} __attribute__ ((packed));
+} __packed;
static inline int wl1271_tx_get_queue(int queue)
{
@@ -158,6 +158,7 @@ static inline int wl1271_tx_ac_to_tid(int ac)
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_complete(struct wl1271 *wl);
+void wl1271_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 055d7bc6f592..184628027213 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -66,41 +66,41 @@ struct ieee80211_header {
u8 bssid[ETH_ALEN];
__le16 seq_ctl;
u8 payload[0];
-} __attribute__ ((packed));
+} __packed;
struct wl12xx_ie_header {
u8 id;
u8 len;
-} __attribute__ ((packed));
+} __packed;
/* IEs */
struct wl12xx_ie_ssid {
struct wl12xx_ie_header header;
char ssid[IW_ESSID_MAX_SIZE];
-} __attribute__ ((packed));
+} __packed;
struct wl12xx_ie_rates {
struct wl12xx_ie_header header;
u8 rates[MAX_SUPPORTED_RATES];
-} __attribute__ ((packed));
+} __packed;
struct wl12xx_ie_ds_params {
struct wl12xx_ie_header header;
u8 channel;
-} __attribute__ ((packed));
+} __packed;
struct country_triplet {
u8 channel;
u8 num_channels;
u8 max_tx_power;
-} __attribute__ ((packed));
+} __packed;
struct wl12xx_ie_country {
struct wl12xx_ie_header header;
u8 country_string[COUNTRY_STRING_LEN];
struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
-} __attribute__ ((packed));
+} __packed;
/* Templates */
@@ -115,30 +115,30 @@ struct wl12xx_beacon_template {
struct wl12xx_ie_rates ext_rates;
struct wl12xx_ie_ds_params ds_params;
struct wl12xx_ie_country country;
-} __attribute__ ((packed));
+} __packed;
struct wl12xx_null_data_template {
struct ieee80211_header header;
-} __attribute__ ((packed));
+} __packed;
struct wl12xx_ps_poll_template {
__le16 fc;
__le16 aid;
u8 bssid[ETH_ALEN];
u8 ta[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
struct wl12xx_qos_null_data_template {
struct ieee80211_header header;
__le16 qos_ctl;
-} __attribute__ ((packed));
+} __packed;
struct wl12xx_probe_req_template {
struct ieee80211_header header;
struct wl12xx_ie_ssid ssid;
struct wl12xx_ie_rates rates;
struct wl12xx_ie_rates ext_rates;
-} __attribute__ ((packed));
+} __packed;
struct wl12xx_probe_resp_template {
@@ -151,6 +151,6 @@ struct wl12xx_probe_resp_template {
struct wl12xx_ie_rates ext_rates;
struct wl12xx_ie_ds_params ds_params;
struct wl12xx_ie_country country;
-} __attribute__ ((packed));
+} __packed;
#endif
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8816e371fd0e..3fbfd19818f1 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -231,12 +231,12 @@ struct iw_mgmt_info_element {
but sizeof(enum) > sizeof(u8) :-( */
u8 len;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
struct iw_mgmt_essid_pset {
struct iw_mgmt_info_element el;
u8 essid[IW_ESSID_MAX_SIZE];
-} __attribute__ ((packed));
+} __packed;
/*
* According to 802.11 Wireless Netowors, the definitive guide - O'Reilly
@@ -247,12 +247,12 @@ struct iw_mgmt_essid_pset {
struct iw_mgmt_data_rset {
struct iw_mgmt_info_element el;
u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS];
-} __attribute__ ((packed));
+} __packed;
struct iw_mgmt_ds_pset {
struct iw_mgmt_info_element el;
u8 chan;
-} __attribute__ ((packed));
+} __packed;
struct iw_mgmt_cf_pset {
struct iw_mgmt_info_element el;
@@ -260,12 +260,12 @@ struct iw_mgmt_cf_pset {
u8 cfp_period;
u16 cfp_max_duration;
u16 cfp_dur_remaining;
-} __attribute__ ((packed));
+} __packed;
struct iw_mgmt_ibss_pset {
struct iw_mgmt_info_element el;
u16 atim_window;
-} __attribute__ ((packed));
+} __packed;
struct wl3501_tx_hdr {
u16 tx_cnt;
@@ -544,12 +544,12 @@ struct wl3501_80211_tx_plcp_hdr {
u8 service;
u16 len;
u16 crc16;
-} __attribute__ ((packed));
+} __packed;
struct wl3501_80211_tx_hdr {
struct wl3501_80211_tx_plcp_hdr pclp_hdr;
struct ieee80211_hdr mac_hdr;
-} __attribute__ ((packed));
+} __packed;
/*
Reserve the beginning Tx space for descriptor use.
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index b0b666019a93..43307bd42a69 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -42,7 +42,8 @@ static struct zd_reg_alpha2_map reg_alpha2_map[] = {
{ ZD_REGDOMAIN_IC, "CA" },
{ ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
{ ZD_REGDOMAIN_JAPAN, "JP" },
- { ZD_REGDOMAIN_JAPAN_ADD, "JP" },
+ { ZD_REGDOMAIN_JAPAN_2, "JP" },
+ { ZD_REGDOMAIN_JAPAN_3, "JP" },
{ ZD_REGDOMAIN_SPAIN, "ES" },
{ ZD_REGDOMAIN_FRANCE, "FR" },
};
@@ -855,7 +856,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
if (skb == NULL)
return -ENOMEM;
if (need_padding) {
- /* Make sure the the payload data is 4 byte aligned. */
+ /* Make sure the payload data is 4 byte aligned. */
skb_reserve(skb, 2);
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 630c298a730e..a6d86b996c79 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -35,7 +35,7 @@ struct zd_ctrlset {
__le16 current_length;
u8 service;
__le16 next_frame_length;
-} __attribute__((packed));
+} __packed;
#define ZD_CS_RESERVED_SIZE 25
@@ -106,7 +106,7 @@ struct zd_ctrlset {
struct rx_length_info {
__le16 length[3];
__le16 tag;
-} __attribute__((packed));
+} __packed;
#define RX_LENGTH_INFO_TAG 0x697e
@@ -117,7 +117,7 @@ struct rx_status {
u8 signal_quality_ofdm;
u8 decryption_type;
u8 frame_status;
-} __attribute__((packed));
+} __packed;
/* rx_status field decryption_type */
#define ZD_RX_NO_WEP 0
@@ -153,7 +153,7 @@ struct tx_status {
u8 mac[ETH_ALEN];
u8 retry;
u8 failure;
-} __attribute__((packed));
+} __packed;
enum mac_flags {
MAC_FIXED_CHANNEL = 0x01,
@@ -212,8 +212,9 @@ struct zd_mac {
#define ZD_REGDOMAIN_ETSI 0x30
#define ZD_REGDOMAIN_SPAIN 0x31
#define ZD_REGDOMAIN_FRANCE 0x32
-#define ZD_REGDOMAIN_JAPAN_ADD 0x40
+#define ZD_REGDOMAIN_JAPAN_2 0x40
#define ZD_REGDOMAIN_JAPAN 0x41
+#define ZD_REGDOMAIN_JAPAN_3 0x49
enum {
MIN_CHANNEL24 = 1,
@@ -225,7 +226,7 @@ enum {
struct ofdm_plcp_header {
u8 prefix[3];
__le16 service;
-} __attribute__((packed));
+} __packed;
static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
{
@@ -252,7 +253,7 @@ struct cck_plcp_header {
u8 service;
__le16 length;
__le16 crc16;
-} __attribute__((packed));
+} __packed;
static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
{
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index c257940b71b6..818e1480ca93 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -844,7 +844,7 @@ out:
* @usb: a &struct zd_usb pointer
* @urb: URB to be freed
*
- * Frees the the transmission URB, which means to put it on the free URB
+ * Frees the transmission URB, which means to put it on the free URB
* list.
*/
static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 049f8b91f020..1b1655cb7cb4 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -79,17 +79,17 @@ enum control_requests {
struct usb_req_read_regs {
__le16 id;
__le16 addr[0];
-} __attribute__((packed));
+} __packed;
struct reg_data {
__le16 addr;
__le16 value;
-} __attribute__((packed));
+} __packed;
struct usb_req_write_regs {
__le16 id;
struct reg_data reg_writes[0];
-} __attribute__((packed));
+} __packed;
enum {
RF_IF_LE = 0x02,
@@ -106,7 +106,7 @@ struct usb_req_rfwrite {
/* RF2595: 24 */
__le16 bit_values[0];
/* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
-} __attribute__((packed));
+} __packed;
/* USB interrupt */
@@ -123,12 +123,12 @@ enum usb_int_flags {
struct usb_int_header {
u8 type; /* must always be 1 */
u8 id;
-} __attribute__((packed));
+} __packed;
struct usb_int_regs {
struct usb_int_header hdr;
struct reg_data regs[0];
-} __attribute__((packed));
+} __packed;
struct usb_int_retry_fail {
struct usb_int_header hdr;
@@ -136,7 +136,7 @@ struct usb_int_retry_fail {
u8 _dummy;
u8 addr[ETH_ALEN];
u8 ibss_wakeup_dest;
-} __attribute__((packed));
+} __packed;
struct read_regs_int {
struct completion completion;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d504e2b60257..b50fedcef8ac 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1621,6 +1621,7 @@ static void backend_changed(struct xenbus_device *dev,
if (xennet_connect(netdev) != 0)
break;
xenbus_switch_state(dev, XenbusStateConnected);
+ netif_notify_peers(netdev);
break;
case XenbusStateClosing:
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 7a44c38aaf65..d79892782a2b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -40,11 +40,7 @@
*/
enum qeth_dbf_names {
QETH_DBF_SETUP,
- QETH_DBF_QERR,
- QETH_DBF_TRACE,
QETH_DBF_MSG,
- QETH_DBF_SENSE,
- QETH_DBF_MISC,
QETH_DBF_CTRL,
QETH_DBF_INFOS /* must be last element */
};
@@ -71,7 +67,19 @@ struct qeth_dbf_info {
debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
#define QETH_DBF_TEXT_(name, level, text...) \
- qeth_dbf_longtext(QETH_DBF_##name, level, text)
+ qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_CARD_TEXT(card, level, text) \
+ debug_text_event(card->debug, level, text)
+
+#define QETH_CARD_HEX(card, level, addr, len) \
+ debug_event(card->debug, level, (void *)(addr), len)
+
+#define QETH_CARD_MESSAGE(card, text...) \
+ debug_sprintf_event(card->debug, level, text)
+
+#define QETH_CARD_TEXT_(card, level, text...) \
+ qeth_dbf_longtext(card->debug, level, text)
#define SENSE_COMMAND_REJECT_BYTE 0
#define SENSE_COMMAND_REJECT_FLAG 0x80
@@ -738,6 +746,7 @@ struct qeth_card {
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
+ debug_info_t *debug;
struct mutex conf_mutex;
};
@@ -857,9 +866,10 @@ void qeth_core_get_ethtool_stats(struct net_device *,
struct ethtool_stats *, u64 *);
void qeth_core_get_strings(struct net_device *, u32, u8 *);
void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
-void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
+void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
int qeth_set_access_ctrl_online(struct qeth_card *card);
+int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 13ef46b9d388..b7019066c303 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -32,16 +32,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
/* N P A M L V H */
[QETH_DBF_SETUP] = {"qeth_setup",
8, 1, 8, 5, &debug_hex_ascii_view, NULL},
- [QETH_DBF_QERR] = {"qeth_qerr",
- 2, 1, 8, 2, &debug_hex_ascii_view, NULL},
- [QETH_DBF_TRACE] = {"qeth_trace",
- 4, 1, 8, 3, &debug_hex_ascii_view, NULL},
[QETH_DBF_MSG] = {"qeth_msg",
8, 1, 128, 3, &debug_sprintf_view, NULL},
- [QETH_DBF_SENSE] = {"qeth_sense",
- 2, 1, 64, 2, &debug_hex_ascii_view, NULL},
- [QETH_DBF_MISC] = {"qeth_misc",
- 2, 1, 256, 2, &debug_hex_ascii_view, NULL},
[QETH_DBF_CTRL] = {"qeth_control",
8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
@@ -65,48 +57,6 @@ static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
-static inline void __qeth_fill_buffer_frag(struct sk_buff *skb,
- struct qdio_buffer *buffer, int is_tso,
- int *next_element_to_fill)
-{
- struct skb_frag_struct *frag;
- int fragno;
- unsigned long addr;
- int element, cnt, dlen;
-
- fragno = skb_shinfo(skb)->nr_frags;
- element = *next_element_to_fill;
- dlen = 0;
-
- if (is_tso)
- buffer->element[element].flags =
- SBAL_FLAGS_MIDDLE_FRAG;
- else
- buffer->element[element].flags =
- SBAL_FLAGS_FIRST_FRAG;
- dlen = skb->len - skb->data_len;
- if (dlen) {
- buffer->element[element].addr = skb->data;
- buffer->element[element].length = dlen;
- element++;
- }
- for (cnt = 0; cnt < fragno; cnt++) {
- frag = &skb_shinfo(skb)->frags[cnt];
- addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
- frag->page_offset;
- buffer->element[element].addr = (char *)addr;
- buffer->element[element].length = frag->size;
- if (cnt < (fragno - 1))
- buffer->element[element].flags =
- SBAL_FLAGS_MIDDLE_FRAG;
- else
- buffer->element[element].flags =
- SBAL_FLAGS_LAST_FRAG;
- element++;
- }
- *next_element_to_fill = element;
-}
-
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
@@ -232,7 +182,7 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
- QETH_DBF_TEXT(TRACE, 5, "clwrklst");
+ QETH_CARD_TEXT(card, 5, "clwrklst");
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.in_buf_pool.entry_list, list){
list_del(&pool_entry->list);
@@ -246,7 +196,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card)
void *ptr;
int i, j;
- QETH_DBF_TEXT(TRACE, 5, "alocpool");
+ QETH_CARD_TEXT(card, 5, "alocpool");
for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
if (!pool_entry) {
@@ -273,7 +223,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card)
int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
{
- QETH_DBF_TEXT(TRACE, 2, "realcbp");
+ QETH_CARD_TEXT(card, 2, "realcbp");
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER))
@@ -293,7 +243,7 @@ static int qeth_issue_next_read(struct qeth_card *card)
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 5, "issnxrd");
+ QETH_CARD_TEXT(card, 5, "issnxrd");
if (card->read.state != CH_STATE_UP)
return -EIO;
iob = qeth_get_buffer(&card->read);
@@ -305,7 +255,7 @@ static int qeth_issue_next_read(struct qeth_card *card)
return -ENOMEM;
}
qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
- QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
+ QETH_CARD_TEXT(card, 6, "noirqpnd");
rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
(addr_t) iob, 0, 0);
if (rc) {
@@ -364,7 +314,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd = NULL;
- QETH_DBF_TEXT(TRACE, 5, "chkipad");
+ QETH_CARD_TEXT(card, 5, "chkipad");
if (IS_IPA(iob->data)) {
cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
if (IS_IPA_REPLY(cmd)) {
@@ -400,10 +350,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_MODCCID:
return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
- QETH_DBF_TEXT(TRACE, 3, "irla");
+ QETH_CARD_TEXT(card, 3, "irla");
break;
case IPA_CMD_UNREGISTER_LOCAL_ADDR:
- QETH_DBF_TEXT(TRACE, 3, "urla");
+ QETH_CARD_TEXT(card, 3, "urla");
break;
default:
QETH_DBF_MESSAGE(2, "Received data is IPA "
@@ -420,7 +370,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
struct qeth_reply *reply, *r;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "clipalst");
+ QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
@@ -448,9 +398,9 @@ static int qeth_check_idx_response(struct qeth_card *card,
buffer[4],
((buffer[4] == 0x22) ?
" -- try another portname" : ""));
- QETH_DBF_TEXT(TRACE, 2, "ckidxres");
- QETH_DBF_TEXT(TRACE, 2, " idxterm");
- QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
+ QETH_CARD_TEXT(card, 2, "ckidxres");
+ QETH_CARD_TEXT(card, 2, " idxterm");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
if (buffer[4] == 0xf6) {
dev_err(&card->gdev->dev,
"The qeth device is not configured "
@@ -467,8 +417,8 @@ static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 4, "setupccw");
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 4, "setupccw");
if (channel == &card->read)
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
else
@@ -481,7 +431,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
{
__u8 index;
- QETH_DBF_TEXT(TRACE, 6, "getbuff");
+ QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
index = channel->io_buf_no;
do {
if (channel->iob[index].state == BUF_STATE_FREE) {
@@ -502,7 +452,7 @@ void qeth_release_buffer(struct qeth_channel *channel,
{
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 6, "relbuff");
+ QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
spin_lock_irqsave(&channel->iob_lock, flags);
memset(iob->data, 0, QETH_BUFSIZE);
iob->state = BUF_STATE_FREE;
@@ -553,9 +503,8 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
int keep_reply;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "sndctlcb");
-
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 4, "sndctlcb");
rc = qeth_check_idx_response(card, iob->data);
switch (rc) {
case 0:
@@ -563,6 +512,7 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
case -EIO:
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
+ /* fall through */
default:
goto out;
}
@@ -722,7 +672,7 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
void qeth_schedule_recovery(struct qeth_card *card)
{
- QETH_DBF_TEXT(TRACE, 2, "startrec");
+ QETH_CARD_TEXT(card, 2, "startrec");
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
@@ -732,15 +682,17 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
{
int dstat, cstat;
char *sense;
+ struct qeth_card *card;
sense = (char *) irb->ecw;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
+ card = CARD_FROM_CDEV(cdev);
if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
- QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
+ QETH_CARD_TEXT(card, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
@@ -753,23 +705,23 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) {
- QETH_DBF_TEXT(TRACE, 2, "REVIND");
+ QETH_CARD_TEXT(card, 2, "REVIND");
return 1;
}
if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) {
- QETH_DBF_TEXT(TRACE, 2, "CMDREJi");
+ QETH_CARD_TEXT(card, 2, "CMDREJi");
return 1;
}
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
- QETH_DBF_TEXT(TRACE, 2, "AFFE");
+ QETH_CARD_TEXT(card, 2, "AFFE");
return 1;
}
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
- QETH_DBF_TEXT(TRACE, 2, "ZEROSEN");
+ QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0;
}
- QETH_DBF_TEXT(TRACE, 2, "DGENCHK");
+ QETH_CARD_TEXT(card, 2, "DGENCHK");
return 1;
}
return 0;
@@ -778,6 +730,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
static long __qeth_check_irb_error(struct ccw_device *cdev,
unsigned long intparm, struct irb *irb)
{
+ struct qeth_card *card;
+
+ card = CARD_FROM_CDEV(cdev);
+
if (!IS_ERR(irb))
return 0;
@@ -785,17 +741,15 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
case -EIO:
QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
dev_name(&cdev->dev));
- QETH_DBF_TEXT(TRACE, 2, "ckirberr");
- QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
break;
case -ETIMEDOUT:
dev_warn(&cdev->dev, "A hardware operation timed out"
" on the device\n");
- QETH_DBF_TEXT(TRACE, 2, "ckirberr");
- QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT);
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
if (intparm == QETH_RCD_PARM) {
- struct qeth_card *card = CARD_FROM_CDEV(cdev);
-
if (card && (card->data.ccwdev == cdev)) {
card->data.state = CH_STATE_DOWN;
wake_up(&card->wait_q);
@@ -805,8 +759,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
default:
QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
dev_name(&cdev->dev), PTR_ERR(irb));
- QETH_DBF_TEXT(TRACE, 2, "ckirberr");
- QETH_DBF_TEXT(TRACE, 2, " rc???");
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT(card, 2, " rc???");
}
return PTR_ERR(irb);
}
@@ -822,8 +776,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
struct qeth_cmd_buffer *iob;
__u8 index;
- QETH_DBF_TEXT(TRACE, 5, "irq");
-
if (__qeth_check_irb_error(cdev, intparm, irb))
return;
cstat = irb->scsw.cmd.cstat;
@@ -833,15 +785,17 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (!card)
return;
+ QETH_CARD_TEXT(card, 5, "irq");
+
if (card->read.ccwdev == cdev) {
channel = &card->read;
- QETH_DBF_TEXT(TRACE, 5, "read");
+ QETH_CARD_TEXT(card, 5, "read");
} else if (card->write.ccwdev == cdev) {
channel = &card->write;
- QETH_DBF_TEXT(TRACE, 5, "write");
+ QETH_CARD_TEXT(card, 5, "write");
} else {
channel = &card->data;
- QETH_DBF_TEXT(TRACE, 5, "data");
+ QETH_CARD_TEXT(card, 5, "data");
}
atomic_set(&channel->irq_pending, 0);
@@ -857,12 +811,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
goto out;
if (intparm == QETH_CLEAR_CHANNEL_PARM) {
- QETH_DBF_TEXT(TRACE, 6, "clrchpar");
+ QETH_CARD_TEXT(card, 6, "clrchpar");
/* we don't have to handle this further */
intparm = 0;
}
if (intparm == QETH_HALT_CHANNEL_PARM) {
- QETH_DBF_TEXT(TRACE, 6, "hltchpar");
+ QETH_CARD_TEXT(card, 6, "hltchpar");
/* we don't have to handle this further */
intparm = 0;
}
@@ -963,7 +917,7 @@ void qeth_clear_qdio_buffers(struct qeth_card *card)
{
int i, j;
- QETH_DBF_TEXT(TRACE, 2, "clearqdbf");
+ QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i)
if (card->qdio.out_qs[i]) {
@@ -978,7 +932,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
int i = 0;
- QETH_DBF_TEXT(TRACE, 5, "freepool");
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.init_pool.entry_list, init_list){
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
@@ -992,7 +945,6 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
{
int i, j;
- QETH_DBF_TEXT(TRACE, 2, "freeqdbf");
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
@@ -1089,7 +1041,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
- QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x",
+ QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
(u8) card->thread_start_mask,
(u8) card->thread_allowed_mask,
(u8) card->thread_running_mask);
@@ -1102,7 +1054,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
{
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
- QETH_DBF_TEXT(TRACE , 2, "strthrd");
+ QETH_CARD_TEXT(card , 2, "strthrd");
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
@@ -1229,8 +1181,8 @@ static int qeth_clear_channel(struct qeth_channel *channel)
struct qeth_card *card;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "clearch");
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 3, "clearch");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1253,8 +1205,8 @@ static int qeth_halt_channel(struct qeth_channel *channel)
struct qeth_card *card;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "haltch");
card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 3, "haltch");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
@@ -1274,7 +1226,7 @@ static int qeth_halt_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
- QETH_DBF_TEXT(TRACE, 3, "haltchs");
+ QETH_CARD_TEXT(card, 3, "haltchs");
rc1 = qeth_halt_channel(&card->read);
rc2 = qeth_halt_channel(&card->write);
rc3 = qeth_halt_channel(&card->data);
@@ -1289,7 +1241,7 @@ static int qeth_clear_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
- QETH_DBF_TEXT(TRACE, 3, "clearchs");
+ QETH_CARD_TEXT(card, 3, "clearchs");
rc1 = qeth_clear_channel(&card->read);
rc2 = qeth_clear_channel(&card->write);
rc3 = qeth_clear_channel(&card->data);
@@ -1304,8 +1256,7 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "clhacrd");
- QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 3, "clhacrd");
if (halt)
rc = qeth_halt_channels(card);
@@ -1318,7 +1269,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "qdioclr");
+ QETH_CARD_TEXT(card, 3, "qdioclr");
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
@@ -1329,7 +1280,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
- QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
+ QETH_CARD_TEXT_(card, 3, "1err%d", rc);
qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
@@ -1340,7 +1291,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
}
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
- QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc);
+ QETH_CARD_TEXT_(card, 3, "2err%d", rc);
card->state = CARD_STATE_DOWN;
return rc;
}
@@ -1440,6 +1391,7 @@ static void qeth_init_func_level(struct qeth_card *card)
QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
break;
case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSN:
card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
break;
default:
@@ -1637,15 +1589,18 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
"host\n");
break;
case QETH_IDX_ACT_ERR_AUTH:
+ case QETH_IDX_ACT_ERR_AUTH_USER:
dev_err(&card->read.ccwdev->dev,
"Setting the device online failed because of "
- "insufficient LPAR authorization\n");
+ "insufficient authorization\n");
break;
default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
dev_name(&card->read.ccwdev->dev));
}
+ QETH_CARD_TEXT_(card, 2, "idxread%c",
+ QETH_IDX_ACT_CAUSE_CODE(iob->data));
goto out;
}
@@ -1705,7 +1660,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long timeout, event_timeout;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "sendctl");
+ QETH_CARD_TEXT(card, 2, "sendctl");
reply = qeth_alloc_reply(card);
if (!reply) {
@@ -1732,7 +1687,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
event_timeout = QETH_TIMEOUT;
timeout = jiffies + event_timeout;
- QETH_DBF_TEXT(TRACE, 6, "noirqpnd");
+ QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
(addr_t) iob, 0, 0);
@@ -1741,7 +1696,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
"ccw_device_start rc = %i\n",
dev_name(&card->write.ccwdev->dev), rc);
- QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
+ QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irqsave(&card->lock, flags);
list_del_init(&reply->list);
qeth_put_reply(reply);
@@ -1978,7 +1933,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
card->info.link_type = link_type;
} else
card->info.link_type = 0;
- QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type);
+ QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -2335,7 +2290,7 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *entry;
- QETH_DBF_TEXT(TRACE, 5, "inwrklst");
+ QETH_CARD_TEXT(card, 5, "inwrklst");
list_for_each_entry(entry,
&card->qdio.init_pool.entry_list, init_list) {
@@ -2522,7 +2477,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int rc;
char prot_type;
- QETH_DBF_TEXT(TRACE, 4, "sendipa");
+ QETH_CARD_TEXT(card, 4, "sendipa");
if (card->options.layer2)
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -2582,7 +2537,7 @@ int qeth_default_setadapterparms_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "defadpcb");
+ QETH_CARD_TEXT(card, 4, "defadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0)
@@ -2597,7 +2552,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 3, "quyadpcb");
+ QETH_CARD_TEXT(card, 3, "quyadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
@@ -2633,7 +2588,7 @@ int qeth_query_setadapterparms(struct qeth_card *card)
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 3, "queryadp");
+ QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
sizeof(struct qeth_ipacmd_setadpparms));
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
@@ -2645,13 +2600,12 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
unsigned int qdio_error, const char *dbftext)
{
if (qdio_error) {
- QETH_DBF_TEXT(TRACE, 2, dbftext);
- QETH_DBF_TEXT(QERR, 2, dbftext);
- QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
+ QETH_CARD_TEXT(card, 2, dbftext);
+ QETH_CARD_TEXT_(card, 2, " F15=%02X",
buf->element[15].flags & 0xff);
- QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
+ QETH_CARD_TEXT_(card, 2, " F14=%02X",
buf->element[14].flags & 0xff);
- QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
+ QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].flags & 0xff) == 0x12) {
card->stats.rx_dropped++;
return 0;
@@ -2717,8 +2671,7 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
if (rc) {
dev_warn(&card->gdev->dev,
"QDIO reported an error, rc=%i\n", rc);
- QETH_DBF_TEXT(TRACE, 2, "qinberr");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "qinberr");
}
queue->next_buf_to_init = (queue->next_buf_to_init + count) %
QDIO_MAX_BUFFERS_PER_Q;
@@ -2731,7 +2684,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
{
int sbalf15 = buffer->buffer->element[15].flags & 0xff;
- QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
+ QETH_CARD_TEXT(card, 6, "hdsnderr");
if (card->info.type == QETH_CARD_TYPE_IQD) {
if (sbalf15 == 0) {
qdio_err = 0;
@@ -2747,9 +2700,8 @@ static int qeth_handle_send_error(struct qeth_card *card,
if ((sbalf15 >= 15) && (sbalf15 <= 31))
return QETH_SEND_ERROR_RETRY;
- QETH_DBF_TEXT(TRACE, 1, "lnkfail");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04x %02x",
+ QETH_CARD_TEXT(card, 1, "lnkfail");
+ QETH_CARD_TEXT_(card, 1, "%04x %02x",
(u16)qdio_err, (u8)sbalf15);
return QETH_SEND_ERROR_LINK_FAILURE;
}
@@ -2764,7 +2716,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
if (atomic_read(&queue->used_buffers)
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
- QETH_DBF_TEXT(TRACE, 6, "np->pack");
+ QETH_CARD_TEXT(queue->card, 6, "np->pack");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_dp_p++;
queue->do_pack = 1;
@@ -2787,7 +2739,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
- QETH_DBF_TEXT(TRACE, 6, "pack->np");
+ QETH_CARD_TEXT(queue->card, 6, "pack->np");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_p_dp++;
queue->do_pack = 0;
@@ -2896,9 +2848,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
/* ignore temporary SIGA errors without busy condition */
if (rc == QDIO_ERROR_SIGA_TARGET)
return;
- QETH_DBF_TEXT(TRACE, 2, "flushbuf");
- QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card));
+ QETH_CARD_TEXT(queue->card, 2, "flushbuf");
+ QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
@@ -2960,10 +2911,9 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
int i;
unsigned qeth_send_err;
- QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
+ QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 2, "achkcond");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "achkcond");
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
return;
@@ -3033,13 +2983,11 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
int qeth_get_elements_no(struct qeth_card *card, void *hdr,
struct sk_buff *skb, int elems)
{
- int elements_needed = 0;
+ int dlen = skb->len - skb->data_len;
+ int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
+ PFN_DOWN((unsigned long)skb->data);
- if (skb_shinfo(skb)->nr_frags > 0)
- elements_needed = (skb_shinfo(skb)->nr_frags + 1);
- if (elements_needed == 0)
- elements_needed = 1 + (((((unsigned long) skb->data) %
- PAGE_SIZE) + skb->len) >> PAGE_SHIFT);
+ elements_needed += skb_shinfo(skb)->nr_frags;
if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
"(Number=%d / Length=%d). Discarded.\n",
@@ -3050,15 +2998,35 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
}
EXPORT_SYMBOL_GPL(qeth_get_elements_no);
+int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
+{
+ int hroom, inpage, rest;
+
+ if (((unsigned long)skb->data & PAGE_MASK) !=
+ (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
+ hroom = skb_headroom(skb);
+ inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
+ rest = len - inpage;
+ if (rest > hroom)
+ return 1;
+ memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
+ skb->data -= rest;
+ QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
+
static inline void __qeth_fill_buffer(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
int offset)
{
- int length = skb->len;
+ int length = skb->len - skb->data_len;
int length_here;
int element;
char *data;
- int first_lap ;
+ int first_lap, cnt;
+ struct skb_frag_struct *frag;
element = *next_element_to_fill;
data = skb->data;
@@ -3081,10 +3049,14 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
length -= length_here;
if (!length) {
if (first_lap)
- buffer->element[element].flags = 0;
+ if (skb_shinfo(skb)->nr_frags)
+ buffer->element[element].flags =
+ SBAL_FLAGS_FIRST_FRAG;
+ else
+ buffer->element[element].flags = 0;
else
buffer->element[element].flags =
- SBAL_FLAGS_LAST_FRAG;
+ SBAL_FLAGS_MIDDLE_FRAG;
} else {
if (first_lap)
buffer->element[element].flags =
@@ -3097,6 +3069,18 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
element++;
first_lap = 0;
}
+
+ for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+ frag = &skb_shinfo(skb)->frags[cnt];
+ buffer->element[element].addr = (char *)page_to_phys(frag->page)
+ + frag->page_offset;
+ buffer->element[element].length = frag->size;
+ buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG;
+ element++;
+ }
+
+ if (buffer->element[element - 1].flags)
+ buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG;
*next_element_to_fill = element;
}
@@ -3137,20 +3121,16 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
buf->next_element_to_fill++;
}
- if (skb_shinfo(skb)->nr_frags == 0)
- __qeth_fill_buffer(skb, buffer, large_send,
- (int *)&buf->next_element_to_fill, offset);
- else
- __qeth_fill_buffer_frag(skb, buffer, large_send,
- (int *)&buf->next_element_to_fill);
+ __qeth_fill_buffer(skb, buffer, large_send,
+ (int *)&buf->next_element_to_fill, offset);
if (!queue->do_pack) {
- QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
+ QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
/* set state to PRIMED -> will be flushed */
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
flush_cnt = 1;
} else {
- QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
+ QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
if (queue->card->options.performance_stats)
queue->card->perf_stats.skbs_sent_pack++;
if (buf->next_element_to_fill >=
@@ -3210,7 +3190,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
rc = dev_queue_xmit(skb);
} else {
dev_kfree_skb_any(skb);
- QETH_DBF_TEXT(QERR, 2, "qrdrop");
+ QETH_CARD_TEXT(card, 2, "qrdrop");
}
}
return 0;
@@ -3312,14 +3292,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_ipacmd_setadpparms *setparms;
- QETH_DBF_TEXT(TRACE, 4, "prmadpcb");
+ QETH_CARD_TEXT(card, 4, "prmadpcb");
cmd = (struct qeth_ipa_cmd *) data;
setparms = &(cmd->data.setadapterparms);
qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
card->info.promisc_mode = setparms->data.mode;
@@ -3333,7 +3313,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "setprom");
+ QETH_CARD_TEXT(card, 4, "setprom");
if (((dev->flags & IFF_PROMISC) &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
@@ -3343,7 +3323,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
mode = SET_PROMISC_MODE_OFF;
if (dev->flags & IFF_PROMISC)
mode = SET_PROMISC_MODE_ON;
- QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode);
+ QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
sizeof(struct qeth_ipacmd_setadpparms));
@@ -3360,9 +3340,9 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu)
card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "chgmtu");
+ QETH_CARD_TEXT(card, 4, "chgmtu");
sprintf(dbf_text, "%8x", new_mtu);
- QETH_DBF_TEXT(TRACE, 4, dbf_text);
+ QETH_CARD_TEXT(card, 4, dbf_text);
if (new_mtu < 64)
return -EINVAL;
@@ -3382,7 +3362,7 @@ struct net_device_stats *qeth_get_stats(struct net_device *dev)
card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 5, "getstat");
+ QETH_CARD_TEXT(card, 5, "getstat");
return &card->stats;
}
@@ -3393,7 +3373,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "chgmaccb");
+ QETH_CARD_TEXT(card, 4, "chgmaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 ||
@@ -3413,7 +3393,7 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "chgmac");
+ QETH_CARD_TEXT(card, 4, "chgmac");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
sizeof(struct qeth_ipacmd_setadpparms));
@@ -3435,7 +3415,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
struct qeth_set_access_ctrl *access_ctrl_req;
int rc;
- QETH_DBF_TEXT(TRACE, 4, "setaccb");
+ QETH_CARD_TEXT(card, 4, "setaccb");
cmd = (struct qeth_ipa_cmd *) data;
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
@@ -3533,7 +3513,7 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_set_access_ctrl *access_ctrl_req;
- QETH_DBF_TEXT(TRACE, 4, "setacctl");
+ QETH_CARD_TEXT(card, 4, "setacctl");
QETH_DBF_TEXT_(SETUP, 2, "setacctl");
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -3555,7 +3535,7 @@ int qeth_set_access_ctrl_online(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "setactlo");
+ QETH_CARD_TEXT(card, 4, "setactlo");
if ((card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) &&
@@ -3583,8 +3563,8 @@ void qeth_tx_timeout(struct net_device *dev)
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 4, "txtimeo");
card = dev->ml_priv;
+ QETH_CARD_TEXT(card, 4, "txtimeo");
card->stats.tx_errors++;
qeth_schedule_recovery(card);
}
@@ -3663,7 +3643,7 @@ static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
{
u16 s1, s2;
- QETH_DBF_TEXT(TRACE, 4, "sendsnmp");
+ QETH_CARD_TEXT(card, 4, "sendsnmp");
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -3688,7 +3668,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
unsigned char *data;
__u16 data_len;
- QETH_DBF_TEXT(TRACE, 3, "snpcmdcb");
+ QETH_CARD_TEXT(card, 3, "snpcmdcb");
cmd = (struct qeth_ipa_cmd *) sdata;
data = (unsigned char *)((char *)cmd - reply->offset);
@@ -3696,13 +3676,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
snmp = &cmd->data.setadapterparms.data.snmp;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code);
return 0;
}
if (cmd->data.setadapterparms.hdr.return_code) {
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
- QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code);
return 0;
}
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
@@ -3713,13 +3693,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
- QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM);
+ QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
cmd->hdr.return_code = -ENOMEM;
return 0;
}
- QETH_DBF_TEXT_(TRACE, 4, "snore%i",
+ QETH_CARD_TEXT_(card, 4, "snore%i",
cmd->data.setadapterparms.hdr.used_total);
- QETH_DBF_TEXT_(TRACE, 4, "sseqn%i",
+ QETH_CARD_TEXT_(card, 4, "sseqn%i",
cmd->data.setadapterparms.hdr.seq_no);
/*copy entries to user buffer*/
if (cmd->data.setadapterparms.hdr.seq_no == 1) {
@@ -3733,9 +3713,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
}
qinfo->udata_offset += data_len;
/* check if all replies received ... */
- QETH_DBF_TEXT_(TRACE, 4, "srtot%i",
+ QETH_CARD_TEXT_(card, 4, "srtot%i",
cmd->data.setadapterparms.hdr.used_total);
- QETH_DBF_TEXT_(TRACE, 4, "srseq%i",
+ QETH_CARD_TEXT_(card, 4, "srseq%i",
cmd->data.setadapterparms.hdr.seq_no);
if (cmd->data.setadapterparms.hdr.seq_no <
cmd->data.setadapterparms.hdr.used_total)
@@ -3752,7 +3732,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "snmpcmd");
+ QETH_CARD_TEXT(card, 3, "snmpcmd");
if (card->info.guestlan)
return -EOPNOTSUPP;
@@ -3766,7 +3746,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
return -EFAULT;
ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
if (!ureq) {
- QETH_DBF_TEXT(TRACE, 2, "snmpnome");
+ QETH_CARD_TEXT(card, 2, "snmpnome");
return -ENOMEM;
}
if (copy_from_user(ureq, udata,
@@ -4120,13 +4100,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
skb_len -= data_len;
if (skb_len) {
if (qeth_is_last_sbale(element)) {
- QETH_DBF_TEXT(TRACE, 4, "unexeob");
- QETH_DBF_TEXT_(TRACE, 4, "%s",
- CARD_BUS_ID(card));
- QETH_DBF_TEXT(QERR, 2, "unexeob");
- QETH_DBF_TEXT_(QERR, 2, "%s",
- CARD_BUS_ID(card));
- QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer));
+ QETH_CARD_TEXT(card, 4, "unexeob");
+ QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
dev_kfree_skb_any(skb);
card->stats.rx_errors++;
return NULL;
@@ -4147,8 +4122,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
return skb;
no_mem:
if (net_ratelimit()) {
- QETH_DBF_TEXT(TRACE, 2, "noskbmem");
- QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
+ QETH_CARD_TEXT(card, 2, "noskbmem");
}
card->stats.rx_dropped++;
return NULL;
@@ -4164,17 +4138,17 @@ static void qeth_unregister_dbf_views(void)
}
}
-void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...)
+void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
{
char dbf_txt_buf[32];
va_list args;
- if (level > (qeth_dbf[dbf_nix].id)->level)
+ if (level > id->level)
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
- debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf);
+ debug_text_event(id, level, dbf_txt_buf);
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
@@ -4282,6 +4256,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
struct device *dev;
int rc;
unsigned long flags;
+ char dbf_name[20];
QETH_DBF_TEXT(SETUP, 2, "probedev");
@@ -4297,6 +4272,17 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
rc = -ENOMEM;
goto err_dev;
}
+
+ snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
+ dev_name(&gdev->dev));
+ card->debug = debug_register(dbf_name, 2, 1, 8);
+ if (!card->debug) {
+ QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
+ rc = -ENOMEM;
+ goto err_card;
+ }
+ debug_register_view(card->debug, &debug_hex_ascii_view);
+
card->read.ccwdev = gdev->cdev[0];
card->write.ccwdev = gdev->cdev[1];
card->data.ccwdev = gdev->cdev[2];
@@ -4309,12 +4295,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
rc = qeth_determine_card_type(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- goto err_card;
+ goto err_dbf;
}
rc = qeth_setup_card(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- goto err_card;
+ goto err_dbf;
}
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -4322,7 +4308,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
else
rc = qeth_core_create_device_attributes(dev);
if (rc)
- goto err_card;
+ goto err_dbf;
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_OSM:
@@ -4352,6 +4338,8 @@ err_attr:
qeth_core_remove_osn_attributes(dev);
else
qeth_core_remove_device_attributes(dev);
+err_dbf:
+ debug_unregister(card->debug);
err_card:
qeth_core_free_card(card);
err_dev:
@@ -4375,6 +4363,7 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
} else {
qeth_core_remove_device_attributes(&gdev->dev);
}
+ debug_unregister(card->debug);
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_del(&card->list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index f9ed24de7514..e37dd8c4bf4e 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -616,8 +616,9 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
-#define QETH_IDX_ACT_ERR_EXCL 0x19
-#define QETH_IDX_ACT_ERR_AUTH 0x1E
+#define QETH_IDX_ACT_ERR_EXCL 0x19
+#define QETH_IDX_ACT_ERR_AUTH 0x1E
+#define QETH_IDX_ACT_ERR_AUTH_USER 0x20
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d43f57a4ac66..32d07c2dcc67 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -79,7 +79,7 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = -EOPNOTSUPP;
}
if (rc)
- QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
+ QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
return rc;
}
@@ -130,7 +130,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
__u8 *mac;
- QETH_DBF_TEXT(TRACE, 2, "L2Sgmacb");
+ QETH_CARD_TEXT(card, 2, "L2Sgmacb");
cmd = (struct qeth_ipa_cmd *) data;
mac = &cmd->data.setdelmac.mac[0];
/* MAC already registered, needed in couple/uncouple case */
@@ -147,7 +147,7 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Sgmac");
+ QETH_CARD_TEXT(card, 2, "L2Sgmac");
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
qeth_l2_send_setgroupmac_cb);
}
@@ -159,7 +159,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
__u8 *mac;
- QETH_DBF_TEXT(TRACE, 2, "L2Dgmacb");
+ QETH_CARD_TEXT(card, 2, "L2Dgmacb");
cmd = (struct qeth_ipa_cmd *) data;
mac = &cmd->data.setdelmac.mac[0];
if (cmd->hdr.return_code)
@@ -170,7 +170,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Dgmac");
+ QETH_CARD_TEXT(card, 2, "L2Dgmac");
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
qeth_l2_send_delgroupmac_cb);
}
@@ -262,15 +262,14 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "L2sdvcb");
+ QETH_CARD_TEXT(card, 2, "L2sdvcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
"Continuing\n", cmd->data.setdelvlan.vlan_id,
QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command);
- QETH_DBF_TEXT_(TRACE, 2, "L2%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
+ QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
return 0;
}
@@ -281,7 +280,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT_(TRACE, 4, "L2sdv%x", ipacmd);
+ QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelvlan.vlan_id = i;
@@ -292,7 +291,7 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
{
struct qeth_vlan_vid *id;
- QETH_DBF_TEXT(TRACE, 3, "L2prcvln");
+ QETH_CARD_TEXT(card, 3, "L2prcvln");
spin_lock_bh(&card->vlanlock);
list_for_each_entry(id, &card->vid_list, list) {
if (clear)
@@ -310,13 +309,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
- QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
+ QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
- QETH_DBF_TEXT(TRACE, 3, "aidOSM");
+ QETH_CARD_TEXT(card, 3, "aidOSM");
return;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "aidREC");
+ QETH_CARD_TEXT(card, 3, "aidREC");
return;
}
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
@@ -334,13 +333,13 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
- QETH_DBF_TEXT(TRACE, 3, "kidOSM");
+ QETH_CARD_TEXT(card, 3, "kidOSM");
return;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "kidREC");
+ QETH_CARD_TEXT(card, 3, "kidREC");
return;
}
spin_lock_bh(&card->vlanlock);
@@ -456,7 +455,7 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
/* else unknown */
default:
dev_kfree_skb_any(skb);
- QETH_DBF_TEXT(TRACE, 3, "inbunkno");
+ QETH_CARD_TEXT(card, 3, "inbunkno");
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
continue;
}
@@ -474,7 +473,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 2, "L2sdmac");
+ QETH_CARD_TEXT(card, 2, "L2sdmac");
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
@@ -488,10 +487,10 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "L2Smaccb");
+ QETH_CARD_TEXT(card, 2, "L2Smaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 2, "L2er%x", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
switch (cmd->hdr.return_code) {
case IPA_RC_L2_DUP_MAC:
@@ -523,7 +522,7 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Setmac");
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
qeth_l2_send_setmac_cb);
}
@@ -534,10 +533,10 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "L2Dmaccb");
+ QETH_CARD_TEXT(card, 2, "L2Dmaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 2, "err%d", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
cmd->hdr.return_code = -EIO;
return 0;
}
@@ -548,7 +547,7 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card,
static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
{
- QETH_DBF_TEXT(TRACE, 2, "L2Delmac");
+ QETH_CARD_TEXT(card, 2, "L2Delmac");
if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
return 0;
return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
@@ -594,23 +593,22 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
struct qeth_card *card = dev->ml_priv;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "setmac");
+ QETH_CARD_TEXT(card, 3, "setmac");
if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
- QETH_DBF_TEXT(TRACE, 3, "setmcINV");
+ QETH_CARD_TEXT(card, 3, "setmcINV");
return -EOPNOTSUPP;
}
if (card->info.type == QETH_CARD_TYPE_OSN ||
card->info.type == QETH_CARD_TYPE_OSM ||
card->info.type == QETH_CARD_TYPE_OSX) {
- QETH_DBF_TEXT(TRACE, 3, "setmcTYP");
+ QETH_CARD_TEXT(card, 3, "setmcTYP");
return -EOPNOTSUPP;
}
- QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
- QETH_DBF_HEX(TRACE, 3, addr->sa_data, OSA_ADDR_LEN);
+ QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "setmcREC");
+ QETH_CARD_TEXT(card, 3, "setmcREC");
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
@@ -627,7 +625,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
if (card->info.type == QETH_CARD_TYPE_OSN)
return ;
- QETH_DBF_TEXT(TRACE, 3, "setmulti");
+ QETH_CARD_TEXT(card, 3, "setmulti");
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
@@ -714,10 +712,13 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (card->info.type != QETH_CARD_TYPE_IQD) {
+ if (qeth_hdr_chk_and_bounce(new_skb,
+ sizeof(struct qeth_hdr_layer2)))
+ goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements);
- else
+ } else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements, data_offset, hd_len);
if (!rc) {
@@ -771,11 +772,10 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 1, "qdinchk");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
+ QETH_CARD_TEXT(card, 1, "qdinchk");
+ QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element,
count);
- QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+ QETH_CARD_TEXT_(card, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
@@ -799,13 +799,13 @@ static int qeth_l2_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethopen");
+ QETH_CARD_TEXT(card, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
if ((card->info.type != QETH_CARD_TYPE_OSN) &&
(!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) {
- QETH_DBF_TEXT(TRACE, 4, "nomacadr");
+ QETH_CARD_TEXT(card, 4, "nomacadr");
return -EPERM;
}
card->data.state = CH_STATE_UP;
@@ -822,7 +822,7 @@ static int qeth_l2_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethstop");
+ QETH_CARD_TEXT(card, 4, "qethstop");
netif_tx_disable(dev);
if (card->state == CARD_STATE_UP)
card->state = CARD_STATE_SOFTSETUP;
@@ -1074,11 +1074,10 @@ static int qeth_l2_recover(void *ptr)
int rc = 0;
card = (struct qeth_card *) ptr;
- QETH_DBF_TEXT(TRACE, 2, "recover1");
- QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
- QETH_DBF_TEXT(TRACE, 2, "recover2");
+ QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
card->use_hard_stop = 1;
@@ -1181,12 +1180,12 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 5, "osndctrd");
+ QETH_CARD_TEXT(card, 5, "osndctrd");
wait_event(card->wait_q,
atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob);
- QETH_DBF_TEXT(TRACE, 6, "osnoirqp");
+ QETH_CARD_TEXT(card, 6, "osnoirqp");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
(addr_t) iob, 0, 0);
@@ -1194,7 +1193,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
"ccw_device_start rc = %i\n", rc);
- QETH_DBF_TEXT_(TRACE, 2, " err%d", rc);
+ QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_release_buffer(iob->channel, iob);
atomic_set(&card->write.irq_pending, 0);
wake_up(&card->wait_q);
@@ -1207,7 +1206,7 @@ static int qeth_osn_send_ipa_cmd(struct qeth_card *card,
{
u16 s1, s2;
- QETH_DBF_TEXT(TRACE, 4, "osndipa");
+ QETH_CARD_TEXT(card, 4, "osndipa");
qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2);
s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len);
@@ -1225,12 +1224,12 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
struct qeth_card *card;
int rc;
- QETH_DBF_TEXT(TRACE, 2, "osnsdmc");
if (!dev)
return -ENODEV;
card = dev->ml_priv;
if (!card)
return -ENODEV;
+ QETH_CARD_TEXT(card, 2, "osnsdmc");
if ((card->state != CARD_STATE_UP) &&
(card->state != CARD_STATE_SOFTSETUP))
return -ENODEV;
@@ -1247,13 +1246,13 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 2, "osnreg");
*dev = qeth_l2_netdev_by_devno(read_dev_no);
if (*dev == NULL)
return -ENODEV;
card = (*dev)->ml_priv;
if (!card)
return -ENODEV;
+ QETH_CARD_TEXT(card, 2, "osnreg");
if ((assist_cb == NULL) || (data_cb == NULL))
return -EINVAL;
card->osn_info.assist_cb = assist_cb;
@@ -1266,12 +1265,12 @@ void qeth_osn_deregister(struct net_device *dev)
{
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 2, "osndereg");
if (!dev)
return;
card = dev->ml_priv;
if (!card)
return;
+ QETH_CARD_TEXT(card, 2, "osndereg");
card->osn_info.assist_cb = NULL;
card->osn_info.data_cb = NULL;
return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 61adae21a464..61d348e51920 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -287,7 +287,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
addr->users += add ? 1 : -1;
if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
qeth_l3_is_addr_covered_by_ipato(card, addr)) {
- QETH_DBF_TEXT(TRACE, 2, "tkovaddr");
+ QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
}
list_add_tail(&addr->entry, card->ip_tbd_list);
@@ -301,13 +301,13 @@ static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "delip");
+ QETH_CARD_TEXT(card, 4, "delip");
if (addr->proto == QETH_PROT_IPV4)
- QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
+ QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
else {
- QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
}
spin_lock_irqsave(&card->ip_lock, flags);
rc = __qeth_l3_insert_ip_todo(card, addr, 0);
@@ -320,12 +320,12 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 4, "addip");
+ QETH_CARD_TEXT(card, 4, "addip");
if (addr->proto == QETH_PROT_IPV4)
- QETH_DBF_HEX(TRACE, 4, &addr->u.a4.addr, 4);
+ QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
else {
- QETH_DBF_HEX(TRACE, 4, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
}
spin_lock_irqsave(&card->ip_lock, flags);
rc = __qeth_l3_insert_ip_todo(card, addr, 1);
@@ -353,10 +353,10 @@ static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
struct qeth_ipaddr *iptodo;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "delmc");
+ QETH_CARD_TEXT(card, 4, "delmc");
iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!iptodo) {
- QETH_DBF_TEXT(TRACE, 2, "dmcnomem");
+ QETH_CARD_TEXT(card, 2, "dmcnomem");
return;
}
iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
@@ -457,8 +457,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
unsigned long flags;
int rc;
- QETH_DBF_TEXT(TRACE, 2, "sdiplist");
- QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "sdiplist");
+ QETH_CARD_HEX(card, 2, &card, sizeof(void *));
if (card->options.sniffer)
return;
@@ -466,7 +466,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
tbd_list = card->ip_tbd_list;
card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
if (!card->ip_tbd_list) {
- QETH_DBF_TEXT(TRACE, 0, "silnomem");
+ QETH_CARD_TEXT(card, 0, "silnomem");
card->ip_tbd_list = tbd_list;
spin_unlock_irqrestore(&card->ip_lock, flags);
return;
@@ -517,7 +517,7 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
struct qeth_ipaddr *addr, *tmp;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "clearip");
+ QETH_CARD_TEXT(card, 4, "clearip");
if (recover && card->options.sniffer)
return;
spin_lock_irqsave(&card->ip_lock, flags);
@@ -577,7 +577,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "setdelmc");
+ QETH_CARD_TEXT(card, 4, "setdelmc");
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -615,8 +615,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
__u8 netmask[16];
- QETH_DBF_TEXT(TRACE, 4, "setdelip");
- QETH_DBF_TEXT_(TRACE, 4, "flags%02X", flags);
+ QETH_CARD_TEXT(card, 4, "setdelip");
+ QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -645,7 +645,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 4, "setroutg");
+ QETH_CARD_TEXT(card, 4, "setroutg");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setrtg.type = (type);
@@ -689,7 +689,7 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "setrtg4");
+ QETH_CARD_TEXT(card, 3, "setrtg4");
qeth_l3_correct_routing_type(card, &card->options.route4.type,
QETH_PROT_IPV4);
@@ -709,7 +709,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "setrtg6");
+ QETH_CARD_TEXT(card, 3, "setrtg6");
#ifdef CONFIG_QETH_IPV6
if (!qeth_is_supported(card, IPA_IPV6))
@@ -753,7 +753,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
unsigned long flags;
int rc = 0;
- QETH_DBF_TEXT(TRACE, 2, "addipato");
+ QETH_CARD_TEXT(card, 2, "addipato");
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
@@ -778,7 +778,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
struct qeth_ipato_entry *ipatoe, *tmp;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 2, "delipato");
+ QETH_CARD_TEXT(card, 2, "delipato");
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
@@ -806,11 +806,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "addvipa4");
+ QETH_CARD_TEXT(card, 2, "addvipa4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "addvipa6");
+ QETH_CARD_TEXT(card, 2, "addvipa6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -841,11 +841,11 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "delvipa4");
+ QETH_CARD_TEXT(card, 2, "delvipa4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "delvipa6");
+ QETH_CARD_TEXT(card, 2, "delvipa6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -870,11 +870,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip4");
+ QETH_CARD_TEXT(card, 2, "addrxip4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip6");
+ QETH_CARD_TEXT(card, 2, "addrxip6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -905,11 +905,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr = qeth_l3_get_addr_buffer(proto);
if (ipaddr) {
if (proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip4");
+ QETH_CARD_TEXT(card, 2, "addrxip4");
memcpy(&ipaddr->u.a4.addr, addr, 4);
ipaddr->u.a4.mask = 0;
} else if (proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "addrxip6");
+ QETH_CARD_TEXT(card, 2, "addrxip6");
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
@@ -929,15 +929,15 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
int cnt = 3;
if (addr->proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "setaddr4");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
+ QETH_CARD_TEXT(card, 2, "setaddr4");
+ QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
} else if (addr->proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "setaddr6");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_TEXT(card, 2, "setaddr6");
+ QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
} else {
- QETH_DBF_TEXT(TRACE, 2, "setaddr?");
- QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
+ QETH_CARD_TEXT(card, 2, "setaddr?");
+ QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
}
do {
if (addr->is_multicast)
@@ -946,10 +946,10 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP,
addr->set_flags);
if (rc)
- QETH_DBF_TEXT(TRACE, 2, "failed");
+ QETH_CARD_TEXT(card, 2, "failed");
} while ((--cnt > 0) && rc);
if (rc) {
- QETH_DBF_TEXT(TRACE, 2, "FAILED");
+ QETH_CARD_TEXT(card, 2, "FAILED");
qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf);
dev_warn(&card->gdev->dev,
"Registering IP address %s failed\n", buf);
@@ -963,15 +963,15 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
int rc = 0;
if (addr->proto == QETH_PROT_IPV4) {
- QETH_DBF_TEXT(TRACE, 2, "deladdr4");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a4.addr, sizeof(int));
+ QETH_CARD_TEXT(card, 2, "deladdr4");
+ QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
} else if (addr->proto == QETH_PROT_IPV6) {
- QETH_DBF_TEXT(TRACE, 2, "deladdr6");
- QETH_DBF_HEX(TRACE, 3, &addr->u.a6.addr, 8);
- QETH_DBF_HEX(TRACE, 3, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_TEXT(card, 2, "deladdr6");
+ QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8);
} else {
- QETH_DBF_TEXT(TRACE, 2, "deladdr?");
- QETH_DBF_HEX(TRACE, 3, addr, sizeof(struct qeth_ipaddr));
+ QETH_CARD_TEXT(card, 2, "deladdr?");
+ QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr));
}
if (addr->is_multicast)
rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM);
@@ -979,7 +979,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP,
addr->del_flags);
if (rc)
- QETH_DBF_TEXT(TRACE, 2, "failed");
+ QETH_CARD_TEXT(card, 2, "failed");
return rc;
}
@@ -1012,7 +1012,7 @@ static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "adpmode");
+ QETH_CARD_TEXT(card, 4, "adpmode");
iob = qeth_get_adapter_cmd(card, command,
sizeof(struct qeth_ipacmd_setadpparms));
@@ -1027,7 +1027,7 @@ static int qeth_l3_setadapter_hstr(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 4, "adphstr");
+ QETH_CARD_TEXT(card, 4, "adphstr");
if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
rc = qeth_l3_send_setadp_mode(card,
@@ -1093,7 +1093,7 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "defadpcb");
+ QETH_CARD_TEXT(card, 4, "defadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0) {
@@ -1106,13 +1106,13 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
- QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
+ QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
}
if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
card->info.tx_csum_mask =
cmd->data.setassparms.data.flags_32bit;
- QETH_DBF_TEXT_(TRACE, 3, "tcsu:%d", card->info.tx_csum_mask);
+ QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
}
return 0;
@@ -1125,7 +1125,7 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "getasscm");
+ QETH_CARD_TEXT(card, 4, "getasscm");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1147,7 +1147,7 @@ static int qeth_l3_send_setassparms(struct qeth_card *card,
int rc;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 4, "sendassp");
+ QETH_CARD_TEXT(card, 4, "sendassp");
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
if (len <= sizeof(__u32))
@@ -1166,7 +1166,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
int rc;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 4, "simassp6");
+ QETH_CARD_TEXT(card, 4, "simassp6");
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
0, QETH_PROT_IPV6);
rc = qeth_l3_send_setassparms(card, iob, 0, 0,
@@ -1182,7 +1182,7 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
int length = 0;
struct qeth_cmd_buffer *iob;
- QETH_DBF_TEXT(TRACE, 4, "simassp4");
+ QETH_CARD_TEXT(card, 4, "simassp4");
if (data)
length = sizeof(__u32);
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
@@ -1196,7 +1196,7 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "ipaarp");
+ QETH_CARD_TEXT(card, 3, "ipaarp");
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
dev_info(&card->gdev->dev,
@@ -1218,7 +1218,7 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "ipaipfrg");
+ QETH_CARD_TEXT(card, 3, "ipaipfrg");
if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) {
dev_info(&card->gdev->dev,
@@ -1243,7 +1243,7 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "stsrcmac");
+ QETH_CARD_TEXT(card, 3, "stsrcmac");
if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
dev_info(&card->gdev->dev,
@@ -1265,7 +1265,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "strtvlan");
+ QETH_CARD_TEXT(card, 3, "strtvlan");
if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
dev_info(&card->gdev->dev,
@@ -1289,7 +1289,7 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "stmcast");
+ QETH_CARD_TEXT(card, 3, "stmcast");
if (!qeth_is_supported(card, IPA_MULTICASTING)) {
dev_info(&card->gdev->dev,
@@ -1349,7 +1349,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "softipv6");
+ QETH_CARD_TEXT(card, 3, "softipv6");
if (card->info.type == QETH_CARD_TYPE_IQD)
goto out;
@@ -1395,7 +1395,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "strtipv6");
+ QETH_CARD_TEXT(card, 3, "strtipv6");
if (!qeth_is_supported(card, IPA_IPV6)) {
dev_info(&card->gdev->dev,
@@ -1412,7 +1412,7 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "stbrdcst");
+ QETH_CARD_TEXT(card, 3, "stbrdcst");
card->info.broadcast_capable = 0;
if (!qeth_is_supported(card, IPA_FILTERING)) {
dev_info(&card->gdev->dev,
@@ -1512,7 +1512,7 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
{
int rc = 0;
- QETH_DBF_TEXT(TRACE, 3, "strtcsum");
+ QETH_CARD_TEXT(card, 3, "strtcsum");
if (card->options.checksum_type == NO_CHECKSUMMING) {
dev_info(&card->gdev->dev,
@@ -1569,7 +1569,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
{
int rc;
- QETH_DBF_TEXT(TRACE, 3, "sttso");
+ QETH_CARD_TEXT(card, 3, "sttso");
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
dev_info(&card->gdev->dev,
@@ -1596,7 +1596,7 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
static int qeth_l3_start_ipassists(struct qeth_card *card)
{
- QETH_DBF_TEXT(TRACE, 3, "strtipas");
+ QETH_CARD_TEXT(card, 3, "strtipas");
qeth_set_access_ctrl_online(card); /* go on*/
qeth_l3_start_ipa_arp_processing(card); /* go on*/
@@ -1619,7 +1619,7 @@ static int qeth_l3_put_unique_id(struct qeth_card *card)
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- QETH_DBF_TEXT(TRACE, 2, "puniqeid");
+ QETH_CARD_TEXT(card, 2, "puniqeid");
if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) ==
UNIQUE_ID_NOT_BY_CARD)
@@ -1723,7 +1723,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
cmd = (struct qeth_ipa_cmd *)data;
rc = cmd->hdr.return_code;
if (rc)
- QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
+ QETH_CARD_TEXT_(card, 2, "dxter%x", rc);
switch (cmd->data.diagass.action) {
case QETH_DIAGS_CMD_TRACE_QUERY:
break;
@@ -1800,7 +1800,7 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
struct ip_mc_list *im4;
char buf[MAX_ADDR_LEN];
- QETH_DBF_TEXT(TRACE, 4, "addmc");
+ QETH_CARD_TEXT(card, 4, "addmc");
for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@@ -1820,7 +1820,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
struct vlan_group *vg;
int i;
- QETH_DBF_TEXT(TRACE, 4, "addmcvl");
+ QETH_CARD_TEXT(card, 4, "addmcvl");
if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
return;
@@ -1844,7 +1844,7 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
{
struct in_device *in4_dev;
- QETH_DBF_TEXT(TRACE, 4, "chkmcv4");
+ QETH_CARD_TEXT(card, 4, "chkmcv4");
in4_dev = in_dev_get(card->dev);
if (in4_dev == NULL)
return;
@@ -1862,7 +1862,7 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
struct ifmcaddr6 *im6;
char buf[MAX_ADDR_LEN];
- QETH_DBF_TEXT(TRACE, 4, "addmc6");
+ QETH_CARD_TEXT(card, 4, "addmc6");
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
@@ -1883,7 +1883,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
struct vlan_group *vg;
int i;
- QETH_DBF_TEXT(TRACE, 4, "admc6vl");
+ QETH_CARD_TEXT(card, 4, "admc6vl");
if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
return;
@@ -1907,7 +1907,7 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
{
struct inet6_dev *in6_dev;
- QETH_DBF_TEXT(TRACE, 4, "chkmcv6");
+ QETH_CARD_TEXT(card, 4, "chkmcv6");
if (!qeth_is_supported(card, IPA_IPV6))
return ;
in6_dev = in6_dev_get(card->dev);
@@ -1928,7 +1928,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
struct in_ifaddr *ifa;
struct qeth_ipaddr *addr;
- QETH_DBF_TEXT(TRACE, 4, "frvaddr4");
+ QETH_CARD_TEXT(card, 4, "frvaddr4");
in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in_dev)
@@ -1954,7 +1954,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
struct inet6_ifaddr *ifa;
struct qeth_ipaddr *addr;
- QETH_DBF_TEXT(TRACE, 4, "frvaddr6");
+ QETH_CARD_TEXT(card, 4, "frvaddr6");
in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
@@ -1989,7 +1989,7 @@ static void qeth_l3_vlan_rx_register(struct net_device *dev,
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
- QETH_DBF_TEXT(TRACE, 4, "vlanreg");
+ QETH_CARD_TEXT(card, 4, "vlanreg");
spin_lock_irqsave(&card->vlanlock, flags);
card->vlangrp = grp;
spin_unlock_irqrestore(&card->vlanlock, flags);
@@ -2005,9 +2005,9 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
- QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
- QETH_DBF_TEXT(TRACE, 3, "kidREC");
+ QETH_CARD_TEXT(card, 3, "kidREC");
return;
}
spin_lock_irqsave(&card->vlanlock, flags);
@@ -2162,7 +2162,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
break;
default:
dev_kfree_skb_any(skb);
- QETH_DBF_TEXT(TRACE, 3, "inbunkno");
+ QETH_CARD_TEXT(card, 3, "inbunkno");
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
continue;
}
@@ -2229,7 +2229,8 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
card = vlan_dev_real_dev(dev)->ml_priv;
if (card && card->options.layer2)
card = NULL;
- QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
+ if (card)
+ QETH_CARD_TEXT_(card, 4, "%d", rc);
return card ;
}
@@ -2307,10 +2308,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
} else if (card->options.sniffer && /* HiperSockets trace */
qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
if (dev->flags & IFF_PROMISC) {
- QETH_DBF_TEXT(TRACE, 3, "+promisc");
+ QETH_CARD_TEXT(card, 3, "+promisc");
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
} else {
- QETH_DBF_TEXT(TRACE, 3, "-promisc");
+ QETH_CARD_TEXT(card, 3, "-promisc");
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
}
}
@@ -2320,7 +2321,7 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 3, "setmulti");
+ QETH_CARD_TEXT(card, 3, "setmulti");
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
@@ -2365,7 +2366,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arpstnoe");
+ QETH_CARD_TEXT(card, 3, "arpstnoe");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2417,17 +2418,17 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
int uentry_size;
int i;
- QETH_DBF_TEXT(TRACE, 4, "arpquecb");
+ QETH_CARD_TEXT(card, 4, "arpquecb");
qinfo = (struct qeth_arp_query_info *) reply->param;
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_TEXT_(TRACE, 4, "qaer1%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "qaer1%i", cmd->hdr.return_code);
return 0;
}
if (cmd->data.setassparms.hdr.return_code) {
cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
- QETH_DBF_TEXT_(TRACE, 4, "qaer2%i", cmd->hdr.return_code);
+ QETH_CARD_TEXT_(card, 4, "qaer2%i", cmd->hdr.return_code);
return 0;
}
qdata = &cmd->data.setassparms.data.query_arp;
@@ -2449,14 +2450,14 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) <
qdata->no_entries * uentry_size){
- QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM);
+ QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
cmd->hdr.return_code = -ENOMEM;
goto out_error;
}
- QETH_DBF_TEXT_(TRACE, 4, "anore%i",
+ QETH_CARD_TEXT_(card, 4, "anore%i",
cmd->data.setassparms.hdr.number_of_replies);
- QETH_DBF_TEXT_(TRACE, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
- QETH_DBF_TEXT_(TRACE, 4, "anoen%i", qdata->no_entries);
+ QETH_CARD_TEXT_(card, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
+ QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
/* strip off "media specific information" */
@@ -2492,7 +2493,7 @@ static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
unsigned long),
void *reply_param)
{
- QETH_DBF_TEXT(TRACE, 4, "sendarp");
+ QETH_CARD_TEXT(card, 4, "sendarp");
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
@@ -2508,7 +2509,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arpquery");
+ QETH_CARD_TEXT(card, 3, "arpquery");
if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
IPA_ARP_PROCESSING)) {
@@ -2551,7 +2552,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arpadent");
+ QETH_CARD_TEXT(card, 3, "arpadent");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2590,7 +2591,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
int tmp;
int rc;
- QETH_DBF_TEXT(TRACE, 3, "arprment");
+ QETH_CARD_TEXT(card, 3, "arprment");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2626,7 +2627,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
int rc;
int tmp;
- QETH_DBF_TEXT(TRACE, 3, "arpflush");
+ QETH_CARD_TEXT(card, 3, "arpflush");
/*
* currently GuestLAN only supports the ARP assist function
@@ -2734,7 +2735,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = -EOPNOTSUPP;
}
if (rc)
- QETH_DBF_TEXT_(TRACE, 2, "ioce%d", rc);
+ QETH_CARD_TEXT_(card, 2, "ioce%d", rc);
return rc;
}
@@ -2903,19 +2904,11 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb)
unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
tcp_hdr(skb)->doff * 4;
int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
- int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
+ int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
elements += skb_shinfo(skb)->nr_frags;
return elements;
}
-static inline int qeth_l3_tso_check(struct sk_buff *skb)
-{
- int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
- (unsigned long)skb->data;
- return (((unsigned long)skb->data & PAGE_MASK) !=
- (((unsigned long)skb->data + len) & PAGE_MASK));
-}
-
static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int rc;
@@ -3015,8 +3008,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
(cast_type == RTN_UNSPEC)) {
hdr = (struct qeth_hdr *)skb_push(new_skb,
sizeof(struct qeth_hdr_tso));
- if (qeth_l3_tso_check(new_skb))
- QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
qeth_tso_fill_header(card, hdr, new_skb);
@@ -3047,10 +3038,20 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
elements_needed += elems;
nr_frags = skb_shinfo(new_skb)->nr_frags;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (card->info.type != QETH_CARD_TYPE_IQD) {
+ int len;
+ if (large_send == QETH_LARGE_SEND_TSO)
+ len = ((unsigned long)tcp_hdr(new_skb) +
+ tcp_hdr(new_skb)->doff * 4) -
+ (unsigned long)new_skb->data;
+ else
+ len = sizeof(struct qeth_hdr_layer3);
+
+ if (qeth_hdr_chk_and_bounce(new_skb, len))
+ goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements_needed);
- else
+ } else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements_needed, data_offset, 0);
@@ -3103,7 +3104,7 @@ static int qeth_l3_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethopen");
+ QETH_CARD_TEXT(card, 4, "qethopen");
if (card->state != CARD_STATE_SOFTSETUP)
return -ENODEV;
card->data.state = CH_STATE_UP;
@@ -3119,7 +3120,7 @@ static int qeth_l3_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- QETH_DBF_TEXT(TRACE, 4, "qethstop");
+ QETH_CARD_TEXT(card, 4, "qethstop");
netif_tx_disable(dev);
if (card->state == CARD_STATE_UP)
card->state = CARD_STATE_SOFTSETUP;
@@ -3312,11 +3313,10 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
- QETH_DBF_TEXT(TRACE, 1, "qdinchk");
- QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
- QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
+ QETH_CARD_TEXT(card, 1, "qdinchk");
+ QETH_CARD_TEXT_(card, 1, "%04X%04X",
first_element, count);
- QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
+ QETH_CARD_TEXT_(card, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
@@ -3522,11 +3522,11 @@ static int qeth_l3_recover(void *ptr)
int rc = 0;
card = (struct qeth_card *) ptr;
- QETH_DBF_TEXT(TRACE, 2, "recover1");
- QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ QETH_CARD_TEXT(card, 2, "recover1");
+ QETH_CARD_HEX(card, 2, &card, sizeof(void *));
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
- QETH_DBF_TEXT(TRACE, 2, "recover2");
+ QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
card->use_hard_stop = 1;
@@ -3624,8 +3624,8 @@ static int qeth_l3_ip_event(struct notifier_block *this,
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
- QETH_DBF_TEXT(TRACE, 3, "ipevent");
card = qeth_l3_get_card_from_dev(dev);
+ QETH_CARD_TEXT(card, 3, "ipevent");
if (!card)
return NOTIFY_DONE;
@@ -3671,11 +3671,11 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
struct qeth_ipaddr *addr;
struct qeth_card *card;
- QETH_DBF_TEXT(TRACE, 3, "ip6event");
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
+ QETH_CARD_TEXT(card, 3, "ip6event");
if (!qeth_is_supported(card, IPA_IPV6))
return NOTIFY_DONE;
@@ -3714,7 +3714,7 @@ static int qeth_l3_register_notifiers(void)
{
int rc;
- QETH_DBF_TEXT(TRACE, 5, "regnotif");
+ QETH_DBF_TEXT(SETUP, 5, "regnotif");
rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
if (rc)
return rc;
@@ -3733,7 +3733,7 @@ static int qeth_l3_register_notifiers(void)
static void qeth_l3_unregister_notifiers(void)
{
- QETH_DBF_TEXT(TRACE, 5, "unregnot");
+ QETH_DBF_TEXT(SETUP, 5, "unregnot");
BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
#ifdef CONFIG_QETH_IPV6
BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 70491274da16..65e1cf104943 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -47,6 +47,7 @@ static struct device *smsg_dev;
static DEFINE_SPINLOCK(smsg_list_lock);
static LIST_HEAD(smsg_list);
+static int iucv_path_connected;
static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
@@ -142,8 +143,10 @@ static int smsg_pm_freeze(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "smsg_pm_freeze\n");
#endif
- if (smsg_path)
+ if (smsg_path && iucv_path_connected) {
iucv_path_sever(smsg_path, NULL);
+ iucv_path_connected = 0;
+ }
return 0;
}
@@ -154,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "smsg_pm_restore_thaw\n");
#endif
- if (smsg_path) {
+ if (smsg_path && iucv_path_connected) {
memset(smsg_path, 0, sizeof(*smsg_path));
smsg_path->msglim = 255;
smsg_path->flags = 0;
@@ -165,6 +168,8 @@ static int smsg_pm_restore_thaw(struct device *dev)
printk(KERN_ERR
"iucv_path_connect returned with rc %i\n", rc);
#endif
+ if (!rc)
+ iucv_path_connected = 1;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
}
return 0;
@@ -214,6 +219,8 @@ static int __init smsg_init(void)
NULL, NULL, NULL);
if (rc)
goto out_free_path;
+ else
+ iucv_path_connected = 1;
smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!smsg_dev) {
rc = -ENOMEM;
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index a175be9c496f..3b6a06eebf7f 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1587,7 +1587,7 @@ cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr)
err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
if (!err)
- return (&rt->u.dst)->dev;
+ return (&rt->dst)->dev;
return NULL;
}
@@ -1649,7 +1649,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
c3cn->saddr.sin_addr.s_addr = rt->rt_src;
/* now commit destination to connection */
- c3cn->dst_cache = &rt->u.dst;
+ c3cn->dst_cache = &rt->dst;
/* try to establish an offloaded connection */
dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 59ae76bace14..7c031fdc8205 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -209,6 +209,24 @@ static void chipco_powercontrol_init(struct ssb_chipcommon *cc)
}
}
+/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
+static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
+{
+ struct ssb_bus *bus = cc->dev->bus;
+
+ switch (bus->chip_id) {
+ case 0x4312:
+ case 0x4322:
+ case 0x4328:
+ return 7000;
+ case 0x4325:
+ /* TODO: */
+ default:
+ return 15000;
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
@@ -218,6 +236,12 @@ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
if (bus->bustype != SSB_BUSTYPE_PCI)
return;
+
+ if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
+ cc->fast_pwrup_delay = pmu_fast_powerup_delay(cc);
+ return;
+ }
+
if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
return;
@@ -235,6 +259,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
return; /* We don't have a ChipCommon */
if (cc->dev->id.revision >= 11)
cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
+ ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
ssb_pmu_init(cc);
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 3d551245a4e2..5732bb2c3578 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -502,9 +502,9 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
}
+/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
void ssb_pmu_init(struct ssb_chipcommon *cc)
{
- struct ssb_bus *bus = cc->dev->bus;
u32 pmucap;
if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
@@ -516,15 +516,12 @@ void ssb_pmu_init(struct ssb_chipcommon *cc)
ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
cc->pmu.rev, pmucap);
- if (cc->pmu.rev >= 1) {
- if ((bus->chip_id == 0x4325) && (bus->chip_rev < 2)) {
- chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
- ~SSB_CHIPCO_PMU_CTL_NOILPONW);
- } else {
- chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
- SSB_CHIPCO_PMU_CTL_NOILPONW);
- }
- }
+ if (cc->pmu.rev == 1)
+ chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
+ ~SSB_CHIPCO_PMU_CTL_NOILPONW);
+ else
+ chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
+ SSB_CHIPCO_PMU_CTL_NOILPONW);
ssb_pmu_pll_init(cc);
ssb_pmu_resources_init(cc);
}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 51275aac5b34..7cee7f4eb60b 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -486,6 +486,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
#ifdef CONFIG_SSB_PCIHOST
sdev->irq = bus->host_pci->irq;
dev->parent = &bus->host_pci->dev;
+ sdev->dma_dev = dev->parent;
#endif
break;
case SSB_BUSTYPE_PCMCIA:
@@ -501,6 +502,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
break;
case SSB_BUSTYPE_SSB:
dev->dma_mask = &dev->coherent_dma_mask;
+ sdev->dma_dev = dev;
break;
}
@@ -1226,80 +1228,6 @@ u32 ssb_dma_translation(struct ssb_device *dev)
}
EXPORT_SYMBOL(ssb_dma_translation);
-int ssb_dma_set_mask(struct ssb_device *dev, u64 mask)
-{
-#ifdef CONFIG_SSB_PCIHOST
- int err;
-#endif
-
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- err = pci_set_dma_mask(dev->bus->host_pci, mask);
- if (err)
- return err;
- err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask);
- return err;
-#endif
- case SSB_BUSTYPE_SSB:
- return dma_set_mask(dev->dev, mask);
- default:
- __ssb_dma_not_implemented(dev);
- }
- return -ENOSYS;
-}
-EXPORT_SYMBOL(ssb_dma_set_mask);
-
-void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp_flags)
-{
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- if (gfp_flags & GFP_DMA) {
- /* Workaround: The PCI API does not support passing
- * a GFP flag. */
- return dma_alloc_coherent(&dev->bus->host_pci->dev,
- size, dma_handle, gfp_flags);
- }
- return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle);
-#endif
- case SSB_BUSTYPE_SSB:
- return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags);
- default:
- __ssb_dma_not_implemented(dev);
- }
- return NULL;
-}
-EXPORT_SYMBOL(ssb_dma_alloc_consistent);
-
-void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- gfp_t gfp_flags)
-{
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- if (gfp_flags & GFP_DMA) {
- /* Workaround: The PCI API does not support passing
- * a GFP flag. */
- dma_free_coherent(&dev->bus->host_pci->dev,
- size, vaddr, dma_handle);
- return;
- }
- pci_free_consistent(dev->bus->host_pci, size,
- vaddr, dma_handle);
- return;
-#endif
- case SSB_BUSTYPE_SSB:
- dma_free_coherent(dev->dev, size, vaddr, dma_handle);
- return;
- default:
- __ssb_dma_not_implemented(dev);
- }
-}
-EXPORT_SYMBOL(ssb_dma_free_consistent);
-
int ssb_bus_may_powerdown(struct ssb_bus *bus)
{
struct ssb_chipcommon *cc;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 6dcda86be6eb..6e88d2b603b4 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -626,11 +626,22 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
return -ENODEV;
}
if (bus->chipco.dev) { /* can be unavailible! */
- bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
- SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
+ /*
+ * get SPROM offset: SSB_SPROM_BASE1 except for
+ * chipcommon rev >= 31 or chip ID is 0x4312 and
+ * chipcommon status & 3 == 2
+ */
+ if (bus->chipco.dev->id.revision >= 31)
+ bus->sprom_offset = SSB_SPROM_BASE31;
+ else if (bus->chip_id == 0x4312 &&
+ (bus->chipco.status & 0x03) == 2)
+ bus->sprom_offset = SSB_SPROM_BASE31;
+ else
+ bus->sprom_offset = SSB_SPROM_BASE1;
} else {
bus->sprom_offset = SSB_SPROM_BASE1;
}
+ ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
if (!buf)
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 7a582e80de18..5ede9c255094 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -71,7 +71,7 @@ static int is_valid_iface(struct net_device *net_dev)
#endif
/* Device is being bridged */
- /* if (net_dev->br_port != NULL)
+ /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
return 0; */
return 1;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 5c0d06c79a81..fb69b01c8f3a 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -171,7 +171,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
int i, count;
rndis_query_cmplt_type *resp;
struct net_device *net;
- const struct net_device_stats *stats;
+ const struct rtnl_link_stats64 *stats;
if (!r) return -ENOMEM;
resp = (rndis_query_cmplt_type *) r->buf;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 0f41c9195e9b..4c218e910635 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -519,13 +519,12 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
/* start polling new socket */
oldsock = vq->private_data;
- if (sock == oldsock)
- goto done;
+ if (sock != oldsock){
+ vhost_net_disable_vq(n, vq);
+ rcu_assign_pointer(vq->private_data, sock);
+ vhost_net_enable_vq(n, vq);
+ }
- vhost_net_disable_vq(n, vq);
- rcu_assign_pointer(vq->private_data, sock);
- vhost_net_enable_vq(n, vq);
-done:
if (oldsock) {
vhost_net_flush_vq(n, index);
fput(oldsock->file);
@@ -626,7 +625,7 @@ static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
}
#endif
-const static struct file_operations vhost_net_fops = {
+static const struct file_operations vhost_net_fops = {
.owner = THIS_MODULE,
.release = vhost_net_release,
.unlocked_ioctl = vhost_net_ioctl,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 3b83382e06eb..04344b711c56 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -237,8 +237,8 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
{
int i;
- if (!mem)
- return 0;
+ if (!mem)
+ return 0;
for (i = 0; i < mem->nregions; ++i) {
struct vhost_memory_region *m = mem->regions + i;
diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h
index 2a61eb1beb85..d9cb19b7cff7 100644
--- a/include/linux/caif/caif_socket.h
+++ b/include/linux/caif/caif_socket.h
@@ -62,6 +62,7 @@ enum caif_channel_priority {
* @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing.
* @CAIFPROTO_UTIL: Utility (Psock) channel.
* @CAIFPROTO_RFM: Remote File Manager
+ * @CAIFPROTO_DEBUG: Debug link
*
* This enum defines the CAIF Channel type to be used. This defines
* the service to connect to on the modem.
@@ -72,6 +73,7 @@ enum caif_protocol_type {
CAIFPROTO_DATAGRAM_LOOP,
CAIFPROTO_UTIL,
CAIFPROTO_RFM,
+ CAIFPROTO_DEBUG,
_CAIFPROTO_MAX
};
#define CAIFPROTO_MAX _CAIFPROTO_MAX
@@ -83,6 +85,28 @@ enum caif_protocol_type {
enum caif_at_type {
CAIF_ATTYPE_PLAIN = 2
};
+ /**
+ * enum caif_debug_type - Content selection for debug connection
+ * @CAIF_DEBUG_TRACE_INTERACTIVE: Connection will contain
+ * both trace and interactive debug.
+ * @CAIF_DEBUG_TRACE: Connection contains trace only.
+ * @CAIF_DEBUG_INTERACTIVE: Connection to interactive debug.
+ */
+enum caif_debug_type {
+ CAIF_DEBUG_TRACE_INTERACTIVE = 0,
+ CAIF_DEBUG_TRACE,
+ CAIF_DEBUG_INTERACTIVE,
+};
+
+/**
+ * enum caif_debug_service - Debug Service Endpoint
+ * @CAIF_RADIO_DEBUG_SERVICE: Debug service on the Radio sub-system
+ * @CAIF_APP_DEBUG_SERVICE: Debug for the applications sub-system
+ */
+enum caif_debug_service {
+ CAIF_RADIO_DEBUG_SERVICE = 1,
+ CAIF_APP_DEBUG_SERVICE
+};
/**
* struct sockaddr_caif - the sockaddr structure for CAIF sockets.
@@ -109,6 +133,12 @@ enum caif_at_type {
*
* @u.rfm.volume: Volume to mount.
*
+ * @u.dbg: Applies when family = CAIFPROTO_DEBUG.
+ *
+ * @u.dbg.type: Type of debug connection to set up
+ * (caif_debug_type).
+ *
+ * @u.dbg.service: Service sub-system to connect (caif_debug_service
* Description:
* This structure holds the connect parameters used for setting up a
* CAIF Channel. It defines the service to connect to on the modem.
@@ -130,6 +160,10 @@ struct sockaddr_caif {
__u32 connection_id;
char volume[16];
} rfm; /* CAIFPROTO_RFM */
+ struct {
+ __u8 type; /* type:enum caif_debug_type */
+ __u8 service; /* service:caif_debug_service */
+ } dbg; /* CAIFPROTO_DEBUG */
} u;
};
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 276b40a16835..2c8af093d8b3 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -586,29 +586,29 @@ struct ethtool_ops {
#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */
#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */
#define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */
-#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
-#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */
+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */
#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */
#define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */
-#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
-#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */
+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */
#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */
#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
-#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
-#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
-#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
-#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
* (ethtool_value) */
#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
* (ethtool_value). */
#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */
#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
-#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
@@ -619,8 +619,8 @@ struct ethtool_ops {
#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */
#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */
#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */
-#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */
-#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */
+#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */
+#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */
#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */
#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
@@ -645,18 +645,18 @@ struct ethtool_ops {
/* Indicates what features are supported by the interface. */
#define SUPPORTED_10baseT_Half (1 << 0)
#define SUPPORTED_10baseT_Full (1 << 1)
-#define SUPPORTED_100baseT_Half (1 << 2)
-#define SUPPORTED_100baseT_Full (1 << 3)
+#define SUPPORTED_100baseT_Half (1 << 2)
+#define SUPPORTED_100baseT_Full (1 << 3)
#define SUPPORTED_1000baseT_Half (1 << 4)
#define SUPPORTED_1000baseT_Full (1 << 5)
#define SUPPORTED_Autoneg (1 << 6)
#define SUPPORTED_TP (1 << 7)
#define SUPPORTED_AUI (1 << 8)
#define SUPPORTED_MII (1 << 9)
-#define SUPPORTED_FIBRE (1 << 10)
+#define SUPPORTED_FIBRE (1 << 10)
#define SUPPORTED_BNC (1 << 11)
#define SUPPORTED_10000baseT_Full (1 << 12)
-#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Pause (1 << 13)
#define SUPPORTED_Asym_Pause (1 << 14)
#define SUPPORTED_2500baseX_Full (1 << 15)
#define SUPPORTED_Backplane (1 << 16)
@@ -666,8 +666,8 @@ struct ethtool_ops {
#define SUPPORTED_10000baseR_FEC (1 << 20)
/* Indicates what features are advertised by the interface. */
-#define ADVERTISED_10baseT_Half (1 << 0)
-#define ADVERTISED_10baseT_Full (1 << 1)
+#define ADVERTISED_10baseT_Half (1 << 0)
+#define ADVERTISED_10baseT_Full (1 << 1)
#define ADVERTISED_100baseT_Half (1 << 2)
#define ADVERTISED_100baseT_Full (1 << 3)
#define ADVERTISED_1000baseT_Half (1 << 4)
@@ -706,12 +706,12 @@ struct ethtool_ops {
#define DUPLEX_FULL 0x01
/* Which connector port. */
-#define PORT_TP 0x00
+#define PORT_TP 0x00
#define PORT_AUI 0x01
#define PORT_MII 0x02
#define PORT_FIBRE 0x03
#define PORT_BNC 0x04
-#define PORT_DA 0x05
+#define PORT_DA 0x05
#define PORT_NONE 0xef
#define PORT_OTHER 0xff
@@ -725,7 +725,7 @@ struct ethtool_ops {
/* Enable or disable autonegotiation. If this is set to enable,
* the forced link modes above are completely ignored.
*/
-#define AUTONEG_DISABLE 0x00
+#define AUTONEG_DISABLE 0x00
#define AUTONEG_ENABLE 0x01
/* Mode MDI or MDI-X */
diff --git a/include/linux/if.h b/include/linux/if.h
index be350e62a905..53558ec59e1b 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -73,6 +73,8 @@
#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */
#define IFF_IN_NETPOLL 0x1000 /* whether we are processing netpoll */
#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */
+#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */
+#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_bonding.h b/include/linux/if_bonding.h
index cd525fae3c98..2c7994372bde 100644
--- a/include/linux/if_bonding.h
+++ b/include/linux/if_bonding.h
@@ -83,6 +83,7 @@
#define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */
+#define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */
/* hashing types */
#define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */
#define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 938b7e81df95..0d241a5c4909 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -102,8 +102,6 @@ struct __fdb_entry {
#include <linux/netdevice.h>
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-extern struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
- struct sk_buff *skb);
extern int (*br_should_route_hook)(struct sk_buff *skb);
#endif
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index bed7a4682b90..c831467774d0 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -119,7 +119,7 @@ struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
__be16 h_proto; /* packet type ID field */
-} __attribute__((packed));
+} __packed;
#ifdef __KERNEL__
#include <linux/skbuff.h>
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h
index 5459c5c09930..9947c39e62f6 100644
--- a/include/linux/if_fddi.h
+++ b/include/linux/if_fddi.h
@@ -67,7 +67,7 @@ struct fddi_8022_1_hdr {
__u8 dsap; /* destination service access point */
__u8 ssap; /* source service access point */
__u8 ctrl; /* control byte #1 */
-} __attribute__ ((packed));
+} __packed;
/* Define 802.2 Type 2 header */
struct fddi_8022_2_hdr {
@@ -75,7 +75,7 @@ struct fddi_8022_2_hdr {
__u8 ssap; /* source service access point */
__u8 ctrl_1; /* control byte #1 */
__u8 ctrl_2; /* control byte #2 */
-} __attribute__ ((packed));
+} __packed;
/* Define 802.2 SNAP header */
#define FDDI_K_OUI_LEN 3
@@ -85,7 +85,7 @@ struct fddi_snap_hdr {
__u8 ctrl; /* always 0x03 */
__u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */
__be16 ethertype; /* packet type ID field */
-} __attribute__ ((packed));
+} __packed;
/* Define FDDI LLC frame header */
struct fddihdr {
@@ -98,7 +98,7 @@ struct fddihdr {
struct fddi_8022_2_hdr llc_8022_2;
struct fddi_snap_hdr llc_snap;
} hdr;
-} __attribute__ ((packed));
+} __packed;
#ifdef __KERNEL__
#include <linux/netdevice.h>
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 80b3a1056a5f..191ee0869bc1 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -135,7 +135,7 @@ struct frhdr
__be16 PID;
#define IP_NLPID pad
-} __attribute__((packed));
+} __packed;
/* see RFC 1490 for the definition of the following */
#define FRAD_I_UI 0x03
diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h
index 8d038eb8db5c..5fe5f307c6f5 100644
--- a/include/linux/if_hippi.h
+++ b/include/linux/if_hippi.h
@@ -104,7 +104,7 @@ struct hippi_fp_hdr {
__be32 fixed;
#endif
__be32 d2_size;
-} __attribute__ ((packed));
+} __packed;
struct hippi_le_hdr {
#if defined (__BIG_ENDIAN_BITFIELD)
@@ -129,7 +129,7 @@ struct hippi_le_hdr {
__u8 daddr[HIPPI_ALEN];
__u16 locally_administered;
__u8 saddr[HIPPI_ALEN];
-} __attribute__ ((packed));
+} __packed;
#define HIPPI_OUI_LEN 3
/*
@@ -142,12 +142,12 @@ struct hippi_snap_hdr {
__u8 ctrl; /* always 0x03 */
__u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/
__be16 ethertype; /* packet type ID field */
-} __attribute__ ((packed));
+} __packed;
struct hippi_hdr {
struct hippi_fp_hdr fp;
struct hippi_le_hdr le;
struct hippi_snap_hdr snap;
-} __attribute__ ((packed));
+} __packed;
#endif /* _LINUX_IF_HIPPI_H */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 85c812db5a3f..7fcad2e1be3d 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
#include <linux/netlink.h>
-/* The struct should be in sync with struct net_device_stats */
+/* This struct should be in sync with struct rtnl_link_stats64 */
struct rtnl_link_stats {
__u32 rx_packets; /* total packets received */
__u32 tx_packets; /* total packets transmitted */
@@ -37,6 +37,7 @@ struct rtnl_link_stats {
__u32 tx_compressed;
};
+/* The main device statistics structure */
struct rtnl_link_stats64 {
__u64 rx_packets; /* total packets received */
__u64 tx_packets; /* total packets transmitted */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 9ea047aca795..c26a0e4f0ce8 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -84,8 +84,4 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops);
extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-
-extern struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *,
- struct sk_buff *);
-
#endif /* _LINUX_IF_MACVLAN_H */
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 6ac23ef1801a..72bfa5a034dd 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -48,6 +48,7 @@ struct sockaddr_ll {
#define PACKET_LOSS 14
#define PACKET_VNET_HDR 15
#define PACKET_TX_TIMESTAMP 16
+#define PACKET_TIMESTAMP 17
struct tpacket_stats {
unsigned int tp_packets;
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index a6577af0c4e6..1925e0c3f162 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -59,7 +59,7 @@ struct sockaddr_pppox {
union{
struct pppoe_addr pppoe;
}sa_addr;
-}__attribute__ ((packed));
+} __packed;
/* The use of the above union isn't viable because the size of this
* struct must stay fixed over time -- applications use sizeof(struct
@@ -70,7 +70,7 @@ struct sockaddr_pppol2tp {
sa_family_t sa_family; /* address family, AF_PPPOX */
unsigned int sa_protocol; /* protocol identifier */
struct pppol2tp_addr pppol2tp;
-}__attribute__ ((packed));
+} __packed;
/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
* bits. So we need a different sockaddr structure.
@@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 {
sa_family_t sa_family; /* address family, AF_PPPOX */
unsigned int sa_protocol; /* protocol identifier */
struct pppol2tpv3_addr pppol2tp;
-} __attribute__ ((packed));
+} __packed;
/*********************************************************************
*
@@ -129,7 +129,7 @@ struct pppoe_hdr {
__be16 sid;
__be16 length;
struct pppoe_tag tag[0];
-} __attribute__ ((packed));
+} __packed;
/* Length of entire PPPoE + PPP header */
#define PPPOE_SES_HLEN 8
diff --git a/include/linux/in.h b/include/linux/in.h
index 583c76f9c30f..41d88a4689af 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -85,6 +85,7 @@ struct in_addr {
#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
#define IP_MINTTL 21
+#define IP_NODEFRAG 22
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 99e1ab7e3eec..940e21595351 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -58,7 +58,7 @@ struct ipv6_opt_hdr {
/*
* TLV encoded option data follows.
*/
-} __attribute__ ((packed)); /* required for some archs */
+} __packed; /* required for some archs */
#define ipv6_destopt_hdr ipv6_opt_hdr
#define ipv6_hopopt_hdr ipv6_opt_hdr
@@ -99,7 +99,7 @@ struct ipv6_destopt_hao {
__u8 type;
__u8 length;
struct in6_addr addr;
-} __attribute__ ((__packed__));
+} __packed;
/*
* IPv6 fixed header
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h
index b9b5a684ed69..b8c23f88dd54 100644
--- a/include/linux/isdnif.h
+++ b/include/linux/isdnif.h
@@ -317,7 +317,7 @@ typedef struct T30_s {
__u8 r_scantime;
__u8 r_id[FAXIDLEN];
__u8 r_code;
-} __attribute__((packed)) T30_s;
+} __packed T30_s;
#define ISDN_TTY_FAX_CONN_IN 0
#define ISDN_TTY_FAX_CONN_OUT 1
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 78c3bed1c3f5..b5e7f2202484 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -251,7 +251,7 @@
struct mISDNhead {
unsigned int prim;
unsigned int id;
-} __attribute__((packed));
+} __packed;
#define MISDN_HEADER_LEN sizeof(struct mISDNhead)
#define MAX_DATA_SIZE 2048
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index 155719dab813..bb58854a8061 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -88,7 +88,7 @@ struct nbd_request {
char handle[8];
__be64 from;
__be32 len;
-} __attribute__ ((packed));
+} __packed;
/*
* This is the reply packet that nbd-server sends back to the client after
diff --git a/include/linux/ncp.h b/include/linux/ncp.h
index 99f0adeeb3f3..3ace8370e61e 100644
--- a/include/linux/ncp.h
+++ b/include/linux/ncp.h
@@ -27,7 +27,7 @@ struct ncp_request_header {
__u8 conn_high;
__u8 function;
__u8 data[0];
-} __attribute__((packed));
+} __packed;
#define NCP_REPLY (0x3333)
#define NCP_WATCHDOG (0x3E3E)
@@ -42,7 +42,7 @@ struct ncp_reply_header {
__u8 completion_code;
__u8 connection_state;
__u8 data[0];
-} __attribute__((packed));
+} __packed;
#define NCP_VOLNAME_LEN (16)
#define NCP_NUMBER_OF_VOLUMES (256)
@@ -158,7 +158,7 @@ struct nw_info_struct {
#ifdef __KERNEL__
struct nw_nfs_info nfs;
#endif
-} __attribute__((packed));
+} __packed;
/* modify mask - use with MODIFY_DOS_INFO structure */
#define DM_ATTRIBUTES (cpu_to_le32(0x02))
@@ -190,12 +190,12 @@ struct nw_modify_dos_info {
__u16 inheritanceGrantMask;
__u16 inheritanceRevokeMask;
__u32 maximumSpace;
-} __attribute__((packed));
+} __packed;
struct nw_search_sequence {
__u8 volNumber;
__u32 dirBase;
__u32 sequence;
-} __attribute__((packed));
+} __packed;
#endif /* _LINUX_NCP_H */
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index 5ec9ca671687..8da05bc098ca 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -104,13 +104,13 @@ struct ncp_server {
unsigned int state; /* STREAM only: receiver state */
struct {
- __u32 magic __attribute__((packed));
- __u32 len __attribute__((packed));
- __u16 type __attribute__((packed));
- __u16 p1 __attribute__((packed));
- __u16 p2 __attribute__((packed));
- __u16 p3 __attribute__((packed));
- __u16 type2 __attribute__((packed));
+ __u32 magic __packed;
+ __u32 len __packed;
+ __u16 type __packed;
+ __u16 p1 __packed;
+ __u16 p2 __packed;
+ __u16 p3 __packed;
+ __u16 type2 __packed;
} buf; /* STREAM only: temporary buffer */
unsigned char* ptr; /* STREAM only: pointer to data */
size_t len; /* STREAM only: length of data to receive */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 40291f375024..8fa5e5aa879a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -159,45 +159,49 @@ static inline bool dev_xmit_complete(int rc)
#define MAX_HEADER (LL_MAX_HEADER + 48)
#endif
-#endif /* __KERNEL__ */
-
/*
- * Network device statistics. Akin to the 2.0 ether stats but
- * with byte counters.
+ * Old network device statistics. Fields are native words
+ * (unsigned long) so they can be read and written atomically.
+ * Each field is padded to 64 bits for compatibility with
+ * rtnl_link_stats64.
*/
+#if BITS_PER_LONG == 64
+#define NET_DEVICE_STATS_DEFINE(name) unsigned long name
+#elif defined(__LITTLE_ENDIAN)
+#define NET_DEVICE_STATS_DEFINE(name) unsigned long name, pad_ ## name
+#else
+#define NET_DEVICE_STATS_DEFINE(name) unsigned long pad_ ## name, name
+#endif
+
struct net_device_stats {
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* no space in linux buffers */
- unsigned long tx_dropped; /* no space available in linux */
- unsigned long multicast; /* multicast packets received */
- unsigned long collisions;
-
- /* detailed rx_errors: */
- unsigned long rx_length_errors;
- unsigned long rx_over_errors; /* receiver ring buff overflow */
- unsigned long rx_crc_errors; /* recved pkt with crc error */
- unsigned long rx_frame_errors; /* recv'd frame alignment error */
- unsigned long rx_fifo_errors; /* recv'r fifo overrun */
- unsigned long rx_missed_errors; /* receiver missed packet */
-
- /* detailed tx_errors */
- unsigned long tx_aborted_errors;
- unsigned long tx_carrier_errors;
- unsigned long tx_fifo_errors;
- unsigned long tx_heartbeat_errors;
- unsigned long tx_window_errors;
-
- /* for cslip etc */
- unsigned long rx_compressed;
- unsigned long tx_compressed;
+ NET_DEVICE_STATS_DEFINE(rx_packets);
+ NET_DEVICE_STATS_DEFINE(tx_packets);
+ NET_DEVICE_STATS_DEFINE(rx_bytes);
+ NET_DEVICE_STATS_DEFINE(tx_bytes);
+ NET_DEVICE_STATS_DEFINE(rx_errors);
+ NET_DEVICE_STATS_DEFINE(tx_errors);
+ NET_DEVICE_STATS_DEFINE(rx_dropped);
+ NET_DEVICE_STATS_DEFINE(tx_dropped);
+ NET_DEVICE_STATS_DEFINE(multicast);
+ NET_DEVICE_STATS_DEFINE(collisions);
+ NET_DEVICE_STATS_DEFINE(rx_length_errors);
+ NET_DEVICE_STATS_DEFINE(rx_over_errors);
+ NET_DEVICE_STATS_DEFINE(rx_crc_errors);
+ NET_DEVICE_STATS_DEFINE(rx_frame_errors);
+ NET_DEVICE_STATS_DEFINE(rx_fifo_errors);
+ NET_DEVICE_STATS_DEFINE(rx_missed_errors);
+ NET_DEVICE_STATS_DEFINE(tx_aborted_errors);
+ NET_DEVICE_STATS_DEFINE(tx_carrier_errors);
+ NET_DEVICE_STATS_DEFINE(tx_fifo_errors);
+ NET_DEVICE_STATS_DEFINE(tx_heartbeat_errors);
+ NET_DEVICE_STATS_DEFINE(tx_window_errors);
+ NET_DEVICE_STATS_DEFINE(rx_compressed);
+ NET_DEVICE_STATS_DEFINE(tx_compressed);
};
+#endif /* __KERNEL__ */
+
/* Media selection options. */
enum {
@@ -381,6 +385,8 @@ enum gro_result {
};
typedef enum gro_result gro_result_t;
+typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb);
+
extern void __napi_schedule(struct napi_struct *n);
static inline int napi_disable_pending(struct napi_struct *n)
@@ -660,10 +666,19 @@ struct netdev_rx_queue {
* Callback uses when the transmitter has not made any progress
* for dev->watchdog ticks.
*
+ * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev);
* struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
* Called when a user wants to get the network device usage
- * statistics. If not defined, the counters in dev->stats will
- * be used.
+ * statistics. Drivers must do one of the following:
+ * 1. Define @ndo_get_stats64 to update a rtnl_link_stats64 structure
+ * (which should normally be dev->stats64) and return a ponter to
+ * it. The structure must not be changed asynchronously.
+ * 2. Define @ndo_get_stats to update a net_device_stats structure
+ * (which should normally be dev->stats) and return a pointer to
+ * it. The structure may be changed asynchronously only if each
+ * field is written atomically.
+ * 3. Update dev->stats asynchronously and atomically, and define
+ * neither operation.
*
* void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
* If device support VLAN receive accleration
@@ -718,6 +733,7 @@ struct net_device_ops {
struct neigh_parms *);
void (*ndo_tx_timeout) (struct net_device *dev);
+ struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
void (*ndo_vlan_rx_register)(struct net_device *dev,
@@ -728,6 +744,8 @@ struct net_device_ops {
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
+ int (*ndo_netpoll_setup)(struct net_device *dev,
+ struct netpoll_info *info);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
@@ -847,7 +865,8 @@ struct net_device {
#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
+ NETIF_F_TSO6 | NETIF_F_UFO)
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
@@ -867,7 +886,10 @@ struct net_device {
int ifindex;
int iflink;
- struct net_device_stats stats;
+ union {
+ struct rtnl_link_stats64 stats64;
+ struct net_device_stats stats;
+ };
#ifdef CONFIG_WIRELESS_EXT
/* List of functions to handle Wireless Extensions (instead of ioctl).
@@ -957,6 +979,8 @@ struct net_device {
#endif
struct netdev_queue rx_queue;
+ rx_handler_func_t *rx_handler;
+ void *rx_handler_data;
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
@@ -1024,10 +1048,6 @@ struct net_device {
/* mid-layer private */
void *ml_priv;
- /* bridge stuff */
- struct net_bridge_port *br_port;
- /* macvlan */
- struct macvlan_port *macvlan_port;
/* GARP */
struct garp_port *garp_port;
@@ -1087,11 +1107,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
static inline
struct net *dev_net(const struct net_device *dev)
{
-#ifdef CONFIG_NET_NS
- return dev->nd_net;
-#else
- return &init_net;
-#endif
+ return read_pnet(&dev->nd_net);
}
static inline
@@ -1272,8 +1288,8 @@ extern void dev_add_pack(struct packet_type *pt);
extern void dev_remove_pack(struct packet_type *pt);
extern void __dev_remove_pack(struct packet_type *pt);
-extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
- unsigned short mask);
+extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
+ unsigned short mask);
extern struct net_device *dev_get_by_name(struct net *net, const char *name);
extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
@@ -1693,6 +1709,11 @@ static inline void napi_free_frags(struct napi_struct *napi)
napi->skb = NULL;
}
+extern int netdev_rx_handler_register(struct net_device *dev,
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data);
+extern void netdev_rx_handler_unregister(struct net_device *dev);
+
extern void netif_nit_deliver(struct sk_buff *skb);
extern int dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
@@ -1772,6 +1793,8 @@ extern void netif_carrier_on(struct net_device *dev);
extern void netif_carrier_off(struct net_device *dev);
+extern void netif_notify_peers(struct net_device *dev);
+
/**
* netif_dormant_on - mark device as dormant.
* @dev: network device
@@ -2116,7 +2139,7 @@ extern void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
extern void dev_load(struct net *net, const char *name);
extern void dev_mcast_init(void);
-extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
+extern const struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev);
extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
extern int netdev_max_backlog;
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 48767cd16453..bb103f43afa0 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -8,6 +8,7 @@ header-y += xt_CONNMARK.h
header-y += xt_CONNSECMARK.h
header-y += xt_CT.h
header-y += xt_DSCP.h
+header-y += xt_IDLETIMER.h
header-y += xt_LED.h
header-y += xt_MARK.h
header-y += xt_NFLOG.h
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 14e6d32002c4..1afd18c855ec 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -76,6 +76,10 @@ enum ip_conntrack_status {
/* Conntrack is a template */
IPS_TEMPLATE_BIT = 11,
IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT),
+
+ /* Conntrack is a fake untracked entry */
+ IPS_UNTRACKED_BIT = 12,
+ IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
};
/* Connection tracking event types */
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
index d3bab7a2c9b7..1d0b84aa1d42 100644
--- a/include/linux/netfilter/nfnetlink_log.h
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -89,6 +89,7 @@ enum nfulnl_attr_config {
#define NFULNL_COPY_NONE 0x00
#define NFULNL_COPY_META 0x01
#define NFULNL_COPY_PACKET 0x02
+#define NFULNL_COPY_DISABLED 0x03
#define NFULNL_CFG_F_SEQ 0x0001
#define NFULNL_CFG_F_SEQ_GLOBAL 0x0002
diff --git a/include/linux/netfilter/xt_IDLETIMER.h b/include/linux/netfilter/xt_IDLETIMER.h
new file mode 100644
index 000000000000..3e1aa1be942e
--- /dev/null
+++ b/include/linux/netfilter/xt_IDLETIMER.h
@@ -0,0 +1,45 @@
+/*
+ * linux/include/linux/netfilter/xt_IDLETIMER.h
+ *
+ * Header file for Xtables timer target module.
+ *
+ * Copyright (C) 2004, 2010 Nokia Corporation
+ * Written by Timo Teras <ext-timo.teras@nokia.com>
+ *
+ * Converted to x_tables and forward-ported to 2.6.34
+ * by Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef _XT_IDLETIMER_H
+#define _XT_IDLETIMER_H
+
+#include <linux/types.h>
+
+#define MAX_IDLETIMER_LABEL_SIZE 28
+
+struct idletimer_tg_info {
+ __u32 timeout;
+
+ char label[MAX_IDLETIMER_LABEL_SIZE];
+
+ /* for kernel module internal use only */
+ struct idletimer_tg *timer __attribute((aligned(8)));
+};
+
+#endif
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e9e231215865..413742c92d14 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
+int __netpoll_setup(struct netpoll *np);
int netpoll_setup(struct netpoll *np);
int netpoll_trap(void);
void netpoll_set_trap(int trap);
+void __netpoll_cleanup(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb);
void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
@@ -57,12 +59,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
static inline bool netpoll_rx(struct sk_buff *skb)
{
- struct netpoll_info *npinfo = skb->dev->npinfo;
+ struct netpoll_info *npinfo;
unsigned long flags;
bool ret = false;
+ rcu_read_lock_bh();
+ npinfo = rcu_dereference_bh(skb->dev->npinfo);
+
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
- return false;
+ goto out;
spin_lock_irqsave(&npinfo->rx_lock, flags);
/* check rx_flags again with the lock held */
@@ -70,12 +75,14 @@ static inline bool netpoll_rx(struct sk_buff *skb)
ret = true;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+out:
+ rcu_read_unlock_bh();
return ret;
}
static inline int netpoll_rx_on(struct sk_buff *skb)
{
- struct netpoll_info *npinfo = skb->dev->npinfo;
+ struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
}
@@ -91,7 +98,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
- rcu_read_lock(); /* deal with race on ->npinfo */
if (dev && dev->npinfo) {
spin_lock(&napi->poll_lock);
napi->poll_owner = smp_processor_id();
@@ -108,7 +114,11 @@ static inline void netpoll_poll_unlock(void *have)
napi->poll_owner = -1;
spin_unlock(&napi->poll_lock);
}
- rcu_read_unlock();
+}
+
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+ return irqs_disabled();
}
#else
@@ -134,6 +144,10 @@ static inline void netpoll_poll_unlock(void *have)
static inline void netpoll_netdev_init(struct net_device *dev)
{
}
+static inline int netpoll_tx_running(struct net_device *dev)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index b7c77f9712f4..64fb32b93a28 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -132,7 +132,7 @@
* %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and
* %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP.
* @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain
- * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will
+ * to the specified ISO/IEC 3166-1 alpha2 country code. The core will
* store this as a valid request and then query userspace for it.
*
* @NL80211_CMD_GET_MESH_PARAMS: Get mesh networking properties for the
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 540703b555cb..b2f1a4d83550 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -210,6 +210,7 @@ static inline int notifier_to_errno(int ret)
#define NETDEV_POST_INIT 0x0010
#define NETDEV_UNREGISTER_BATCH 0x0011
#define NETDEV_BONDING_DESLAVE 0x0012
+#define NETDEV_NOTIFY_PEERS 0x0013
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index e5126cff9b2a..24426c3d6b5a 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -56,7 +56,7 @@ struct phonethdr {
__be16 pn_length;
__u8 pn_robj;
__u8 pn_sobj;
-} __attribute__((packed));
+} __packed;
/* Common Phonet payload header */
struct phonetmsg {
@@ -98,7 +98,7 @@ struct sockaddr_pn {
__u8 spn_dev;
__u8 spn_resource;
__u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
-} __attribute__ ((packed));
+} __packed;
/* Well known address */
#define PN_DEV_PC 0x10
diff --git a/include/linux/rds.h b/include/linux/rds.h
index cab4994c2f63..24bce3ded9ea 100644
--- a/include/linux/rds.h
+++ b/include/linux/rds.h
@@ -100,7 +100,7 @@
struct rds_info_counter {
u_int8_t name[32];
u_int64_t value;
-} __attribute__((packed));
+} __packed;
#define RDS_INFO_CONNECTION_FLAG_SENDING 0x01
#define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02
@@ -115,7 +115,7 @@ struct rds_info_connection {
__be32 faddr;
u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */
u_int8_t flags;
-} __attribute__((packed));
+} __packed;
struct rds_info_flow {
__be32 laddr;
@@ -123,7 +123,7 @@ struct rds_info_flow {
u_int32_t bytes;
__be16 lport;
__be16 fport;
-} __attribute__((packed));
+} __packed;
#define RDS_INFO_MESSAGE_FLAG_ACK 0x01
#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02
@@ -136,7 +136,7 @@ struct rds_info_message {
__be16 lport;
__be16 fport;
u_int8_t flags;
-} __attribute__((packed));
+} __packed;
struct rds_info_socket {
u_int32_t sndbuf;
@@ -146,7 +146,7 @@ struct rds_info_socket {
__be16 connected_port;
u_int32_t rcvbuf;
u_int64_t inum;
-} __attribute__((packed));
+} __packed;
struct rds_info_tcp_socket {
__be32 local_addr;
@@ -158,7 +158,7 @@ struct rds_info_tcp_socket {
u_int32_t last_sent_nxt;
u_int32_t last_expected_una;
u_int32_t last_seen_una;
-} __attribute__((packed));
+} __packed;
#define RDS_IB_GID_LEN 16
struct rds_info_rdma_connection {
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index c20d3ce673c0..c11a28706fa4 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -61,7 +61,7 @@ typedef struct sctphdr {
__be16 dest;
__be32 vtag;
__le32 checksum;
-} __attribute__((packed)) sctp_sctphdr_t;
+} __packed sctp_sctphdr_t;
#ifdef __KERNEL__
#include <linux/skbuff.h>
@@ -77,7 +77,7 @@ typedef struct sctp_chunkhdr {
__u8 type;
__u8 flags;
__be16 length;
-} __attribute__((packed)) sctp_chunkhdr_t;
+} __packed sctp_chunkhdr_t;
/* Section 3.2. Chunk Type Values.
@@ -167,7 +167,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 };
typedef struct sctp_paramhdr {
__be16 type;
__be16 length;
-} __attribute__((packed)) sctp_paramhdr_t;
+} __packed sctp_paramhdr_t;
typedef enum {
@@ -228,12 +228,12 @@ typedef struct sctp_datahdr {
__be16 ssn;
__be32 ppid;
__u8 payload[0];
-} __attribute__((packed)) sctp_datahdr_t;
+} __packed sctp_datahdr_t;
typedef struct sctp_data_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_datahdr_t data_hdr;
-} __attribute__((packed)) sctp_data_chunk_t;
+} __packed sctp_data_chunk_t;
/* DATA Chuck Specific Flags */
enum {
@@ -259,78 +259,78 @@ typedef struct sctp_inithdr {
__be16 num_inbound_streams;
__be32 initial_tsn;
__u8 params[0];
-} __attribute__((packed)) sctp_inithdr_t;
+} __packed sctp_inithdr_t;
typedef struct sctp_init_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_inithdr_t init_hdr;
-} __attribute__((packed)) sctp_init_chunk_t;
+} __packed sctp_init_chunk_t;
/* Section 3.3.2.1. IPv4 Address Parameter (5) */
typedef struct sctp_ipv4addr_param {
sctp_paramhdr_t param_hdr;
struct in_addr addr;
-} __attribute__((packed)) sctp_ipv4addr_param_t;
+} __packed sctp_ipv4addr_param_t;
/* Section 3.3.2.1. IPv6 Address Parameter (6) */
typedef struct sctp_ipv6addr_param {
sctp_paramhdr_t param_hdr;
struct in6_addr addr;
-} __attribute__((packed)) sctp_ipv6addr_param_t;
+} __packed sctp_ipv6addr_param_t;
/* Section 3.3.2.1 Cookie Preservative (9) */
typedef struct sctp_cookie_preserve_param {
sctp_paramhdr_t param_hdr;
__be32 lifespan_increment;
-} __attribute__((packed)) sctp_cookie_preserve_param_t;
+} __packed sctp_cookie_preserve_param_t;
/* Section 3.3.2.1 Host Name Address (11) */
typedef struct sctp_hostname_param {
sctp_paramhdr_t param_hdr;
uint8_t hostname[0];
-} __attribute__((packed)) sctp_hostname_param_t;
+} __packed sctp_hostname_param_t;
/* Section 3.3.2.1 Supported Address Types (12) */
typedef struct sctp_supported_addrs_param {
sctp_paramhdr_t param_hdr;
__be16 types[0];
-} __attribute__((packed)) sctp_supported_addrs_param_t;
+} __packed sctp_supported_addrs_param_t;
/* Appendix A. ECN Capable (32768) */
typedef struct sctp_ecn_capable_param {
sctp_paramhdr_t param_hdr;
-} __attribute__((packed)) sctp_ecn_capable_param_t;
+} __packed sctp_ecn_capable_param_t;
/* ADDIP Section 3.2.6 Adaptation Layer Indication */
typedef struct sctp_adaptation_ind_param {
struct sctp_paramhdr param_hdr;
__be32 adaptation_ind;
-} __attribute__((packed)) sctp_adaptation_ind_param_t;
+} __packed sctp_adaptation_ind_param_t;
/* ADDIP Section 4.2.7 Supported Extensions Parameter */
typedef struct sctp_supported_ext_param {
struct sctp_paramhdr param_hdr;
__u8 chunks[0];
-} __attribute__((packed)) sctp_supported_ext_param_t;
+} __packed sctp_supported_ext_param_t;
/* AUTH Section 3.1 Random */
typedef struct sctp_random_param {
sctp_paramhdr_t param_hdr;
__u8 random_val[0];
-} __attribute__((packed)) sctp_random_param_t;
+} __packed sctp_random_param_t;
/* AUTH Section 3.2 Chunk List */
typedef struct sctp_chunks_param {
sctp_paramhdr_t param_hdr;
__u8 chunks[0];
-} __attribute__((packed)) sctp_chunks_param_t;
+} __packed sctp_chunks_param_t;
/* AUTH Section 3.3 HMAC Algorithm */
typedef struct sctp_hmac_algo_param {
sctp_paramhdr_t param_hdr;
__be16 hmac_ids[0];
-} __attribute__((packed)) sctp_hmac_algo_param_t;
+} __packed sctp_hmac_algo_param_t;
/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
* The INIT ACK chunk is used to acknowledge the initiation of an SCTP
@@ -342,13 +342,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t;
typedef struct sctp_cookie_param {
sctp_paramhdr_t p;
__u8 body[0];
-} __attribute__((packed)) sctp_cookie_param_t;
+} __packed sctp_cookie_param_t;
/* Section 3.3.3.1 Unrecognized Parameters (8) */
typedef struct sctp_unrecognized_param {
sctp_paramhdr_t param_hdr;
sctp_paramhdr_t unrecognized;
-} __attribute__((packed)) sctp_unrecognized_param_t;
+} __packed sctp_unrecognized_param_t;
@@ -363,7 +363,7 @@ typedef struct sctp_unrecognized_param {
typedef struct sctp_gap_ack_block {
__be16 start;
__be16 end;
-} __attribute__((packed)) sctp_gap_ack_block_t;
+} __packed sctp_gap_ack_block_t;
typedef __be32 sctp_dup_tsn_t;
@@ -378,12 +378,12 @@ typedef struct sctp_sackhdr {
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
sctp_sack_variable_t variable[0];
-} __attribute__((packed)) sctp_sackhdr_t;
+} __packed sctp_sackhdr_t;
typedef struct sctp_sack_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_sackhdr_t sack_hdr;
-} __attribute__((packed)) sctp_sack_chunk_t;
+} __packed sctp_sack_chunk_t;
/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
@@ -395,12 +395,12 @@ typedef struct sctp_sack_chunk {
typedef struct sctp_heartbeathdr {
sctp_paramhdr_t info;
-} __attribute__((packed)) sctp_heartbeathdr_t;
+} __packed sctp_heartbeathdr_t;
typedef struct sctp_heartbeat_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_heartbeathdr_t hb_hdr;
-} __attribute__((packed)) sctp_heartbeat_chunk_t;
+} __packed sctp_heartbeat_chunk_t;
/* For the abort and shutdown ACK we must carry the init tag in the
@@ -409,7 +409,7 @@ typedef struct sctp_heartbeat_chunk {
*/
typedef struct sctp_abort_chunk {
sctp_chunkhdr_t uh;
-} __attribute__((packed)) sctp_abort_chunk_t;
+} __packed sctp_abort_chunk_t;
/* For the graceful shutdown we must carry the tag (in common header)
@@ -417,12 +417,12 @@ typedef struct sctp_abort_chunk {
*/
typedef struct sctp_shutdownhdr {
__be32 cum_tsn_ack;
-} __attribute__((packed)) sctp_shutdownhdr_t;
+} __packed sctp_shutdownhdr_t;
struct sctp_shutdown_chunk_t {
sctp_chunkhdr_t chunk_hdr;
sctp_shutdownhdr_t shutdown_hdr;
-} __attribute__ ((packed));
+} __packed;
/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */
@@ -430,12 +430,12 @@ typedef struct sctp_errhdr {
__be16 cause;
__be16 length;
__u8 variable[0];
-} __attribute__((packed)) sctp_errhdr_t;
+} __packed sctp_errhdr_t;
typedef struct sctp_operr_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_errhdr_t err_hdr;
-} __attribute__((packed)) sctp_operr_chunk_t;
+} __packed sctp_operr_chunk_t;
/* RFC 2960 3.3.10 - Operation Error
*
@@ -525,7 +525,7 @@ typedef struct sctp_ecnehdr {
typedef struct sctp_ecne_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_ecnehdr_t ence_hdr;
-} __attribute__((packed)) sctp_ecne_chunk_t;
+} __packed sctp_ecne_chunk_t;
/* RFC 2960. Appendix A. Explicit Congestion Notification.
* Congestion Window Reduced (CWR) (13)
@@ -537,7 +537,7 @@ typedef struct sctp_cwrhdr {
typedef struct sctp_cwr_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_cwrhdr_t cwr_hdr;
-} __attribute__((packed)) sctp_cwr_chunk_t;
+} __packed sctp_cwr_chunk_t;
/* PR-SCTP
* 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
@@ -588,17 +588,17 @@ typedef struct sctp_cwr_chunk {
struct sctp_fwdtsn_skip {
__be16 stream;
__be16 ssn;
-} __attribute__((packed));
+} __packed;
struct sctp_fwdtsn_hdr {
__be32 new_cum_tsn;
struct sctp_fwdtsn_skip skip[0];
-} __attribute((packed));
+} __packed;
struct sctp_fwdtsn_chunk {
struct sctp_chunkhdr chunk_hdr;
struct sctp_fwdtsn_hdr fwdtsn_hdr;
-} __attribute((packed));
+} __packed;
/* ADDIP
@@ -636,17 +636,17 @@ struct sctp_fwdtsn_chunk {
typedef struct sctp_addip_param {
sctp_paramhdr_t param_hdr;
__be32 crr_id;
-} __attribute__((packed)) sctp_addip_param_t;
+} __packed sctp_addip_param_t;
typedef struct sctp_addiphdr {
__be32 serial;
__u8 params[0];
-} __attribute__((packed)) sctp_addiphdr_t;
+} __packed sctp_addiphdr_t;
typedef struct sctp_addip_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_addiphdr_t addip_hdr;
-} __attribute__((packed)) sctp_addip_chunk_t;
+} __packed sctp_addip_chunk_t;
/* AUTH
* Section 4.1 Authentication Chunk (AUTH)
@@ -701,11 +701,11 @@ typedef struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
__u8 hmac[0];
-} __attribute__((packed)) sctp_authhdr_t;
+} __packed sctp_authhdr_t;
typedef struct sctp_auth_chunk {
sctp_chunkhdr_t chunk_hdr;
sctp_authhdr_t auth_hdr;
-} __attribute__((packed)) sctp_auth_chunk_t;
+} __packed sctp_auth_chunk_t;
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f89e7fd59a4c..ac74ee085d74 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1414,12 +1414,14 @@ static inline int skb_network_offset(const struct sk_buff *skb)
*
* Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
- * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
- * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
+ *
+ * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
+ * to reduce average number of cache lines per packet.
+ * get_rps_cpus() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 64
+#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
@@ -2132,7 +2134,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb)
/* LRO sets gso_size but not gso_type, whereas if GSO is really
* wanted then gso_type will be set. */
struct skb_shared_info *shinfo = skb_shinfo(skb);
- if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
+ if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
+ unlikely(shinfo->gso_type == 0)) {
__skb_warn_lro_forwarding(skb);
return true;
}
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 52797714ade7..ebb0c80ffd6e 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -229,6 +229,7 @@ enum
LINUX_MIB_TCPBACKLOGDROP,
LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
LINUX_MIB_TCPDEFERACCEPTDROP,
+ LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
__LINUX_MIB_MAX
};
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 032a19eb61b1..a2fada9becb6 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -24,6 +24,9 @@ struct __kernel_sockaddr_storage {
#include <linux/types.h> /* pid_t */
#include <linux/compiler.h> /* __user */
+struct pid;
+struct cred;
+
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -309,6 +312,8 @@ struct ucred {
#define IPX_TYPE 1
#ifdef __KERNEL__
+extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
+
extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index a2608bff9c78..623b704fdc42 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -167,7 +167,7 @@ struct ssb_device {
* is an optimization. */
const struct ssb_bus_ops *ops;
- struct device *dev;
+ struct device *dev, *dma_dev;
struct ssb_bus *bus;
struct ssb_device_id id;
@@ -470,14 +470,6 @@ extern u32 ssb_dma_translation(struct ssb_device *dev);
#define SSB_DMA_TRANSLATION_MASK 0xC0000000
#define SSB_DMA_TRANSLATION_SHIFT 30
-extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask);
-
-extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp_flags);
-extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- gfp_t gfp_flags);
-
static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
{
#ifdef CONFIG_SSB_DEBUG
@@ -486,155 +478,6 @@ static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
#endif /* DEBUG */
}
-static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
-{
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- return pci_dma_mapping_error(dev->bus->host_pci, addr);
-#endif
- break;
- case SSB_BUSTYPE_SSB:
- return dma_mapping_error(dev->dev, addr);
- default:
- break;
- }
- __ssb_dma_not_implemented(dev);
- return -ENOSYS;
-}
-
-static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
- size_t size, enum dma_data_direction dir)
-{
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- return pci_map_single(dev->bus->host_pci, p, size, dir);
-#endif
- break;
- case SSB_BUSTYPE_SSB:
- return dma_map_single(dev->dev, p, size, dir);
- default:
- break;
- }
- __ssb_dma_not_implemented(dev);
- return 0;
-}
-
-static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir)
-{
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
- return;
-#endif
- break;
- case SSB_BUSTYPE_SSB:
- dma_unmap_single(dev->dev, dma_addr, size, dir);
- return;
- default:
- break;
- }
- __ssb_dma_not_implemented(dev);
-}
-
-static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
- dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction dir)
-{
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
- size, dir);
- return;
-#endif
- break;
- case SSB_BUSTYPE_SSB:
- dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
- return;
- default:
- break;
- }
- __ssb_dma_not_implemented(dev);
-}
-
-static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
- dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction dir)
-{
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
- size, dir);
- return;
-#endif
- break;
- case SSB_BUSTYPE_SSB:
- dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
- return;
- default:
- break;
- }
- __ssb_dma_not_implemented(dev);
-}
-
-static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
- dma_addr_t dma_addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- /* Just sync everything. That's all the PCI API can do. */
- pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
- offset + size, dir);
- return;
-#endif
- break;
- case SSB_BUSTYPE_SSB:
- dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
- size, dir);
- return;
- default:
- break;
- }
- __ssb_dma_not_implemented(dev);
-}
-
-static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
- dma_addr_t dma_addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
- switch (dev->bus->bustype) {
- case SSB_BUSTYPE_PCI:
-#ifdef CONFIG_SSB_PCIHOST
- /* Just sync everything. That's all the PCI API can do. */
- pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
- offset + size, dir);
- return;
-#endif
- break;
- case SSB_BUSTYPE_SSB:
- dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
- size, dir);
- return;
- default:
- break;
- }
- __ssb_dma_not_implemented(dev);
-}
-
-
#ifdef CONFIG_SSB_PCIHOST
/* PCI-host wrapper driver */
extern int ssb_pcihost_register(struct pci_driver *driver);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
new file mode 100644
index 000000000000..d0505156ed52
--- /dev/null
+++ b/include/linux/u64_stats_sync.h
@@ -0,0 +1,107 @@
+#ifndef _LINUX_U64_STATS_SYNC_H
+#define _LINUX_U64_STATS_SYNC_H
+
+/*
+ * To properly implement 64bits network statistics on 32bit and 64bit hosts,
+ * we provide a synchronization point, that is a noop on 64bit or UP kernels.
+ *
+ * Key points :
+ * 1) Use a seqcount on SMP 32bits, with low overhead.
+ * 2) Whole thing is a noop on 64bit arches or UP kernels.
+ * 3) Write side must ensure mutual exclusion or one seqcount update could
+ * be lost, thus blocking readers forever.
+ * If this synchronization point is not a mutex, but a spinlock or
+ * spinlock_bh() or disable_bh() :
+ * 3.1) Write side should not sleep.
+ * 3.2) Write side should not allow preemption.
+ * 3.3) If applicable, interrupts should be disabled.
+ *
+ * 4) If reader fetches several counters, there is no guarantee the whole values
+ * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
+ *
+ * 5) readers are allowed to sleep or be preempted/interrupted : They perform
+ * pure reads. But if they have to fetch many values, it's better to not allow
+ * preemptions/interruptions to avoid many retries.
+ *
+ * Usage :
+ *
+ * Stats producer (writer) should use following template granted it already got
+ * an exclusive access to counters (a lock is already taken, or per cpu
+ * data is used [in a non preemptable context])
+ *
+ * spin_lock_bh(...) or other synchronization to get exclusive access
+ * ...
+ * u64_stats_update_begin(&stats->syncp);
+ * stats->bytes64 += len; // non atomic operation
+ * stats->packets64++; // non atomic operation
+ * u64_stats_update_end(&stats->syncp);
+ *
+ * While a consumer (reader) should use following template to get consistent
+ * snapshot for each variable (but no guarantee on several ones)
+ *
+ * u64 tbytes, tpackets;
+ * unsigned int start;
+ *
+ * do {
+ * start = u64_stats_fetch_begin(&stats->syncp);
+ * tbytes = stats->bytes64; // non atomic operation
+ * tpackets = stats->packets64; // non atomic operation
+ * } while (u64_stats_fetch_retry(&stats->lock, syncp));
+ *
+ *
+ * Example of use in drivers/net/loopback.c, using per_cpu containers,
+ * in BH disabled context.
+ */
+#include <linux/seqlock.h>
+
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+struct u64_stats_sync {
+ seqcount_t seq;
+};
+
+static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+{
+ write_seqcount_begin(&syncp->seq);
+}
+
+static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+ write_seqcount_end(&syncp->seq);
+}
+
+static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+ return read_seqcount_begin(&syncp->seq);
+}
+
+static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+ return read_seqcount_retry(&syncp->seq, start);
+}
+
+#else
+struct u64_stats_sync {
+};
+
+static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+{
+}
+
+static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+}
+
+static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+ return 0;
+}
+
+static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+ return false;
+}
+#endif
+
+#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index cc4f45361dbb..8178156711f9 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -36,6 +36,9 @@ static inline void put_user_ns(struct user_namespace *ns)
kref_put(&ns->kref, free_user_ns);
}
+uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid);
+gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid);
+
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -52,6 +55,17 @@ static inline void put_user_ns(struct user_namespace *ns)
{
}
+static inline uid_t user_ns_map_uid(struct user_namespace *to,
+ const struct cred *cred, uid_t uid)
+{
+ return uid;
+}
+static inline gid_t user_ns_map_gid(struct user_namespace *to,
+ const struct cred *cred, gid_t gid)
+{
+ return gid;
+}
+
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/linux/wlp.h b/include/linux/wlp.h
index ac95ce6606ac..c76fe2392506 100644
--- a/include/linux/wlp.h
+++ b/include/linux/wlp.h
@@ -300,7 +300,7 @@ struct wlp_ie {
__le16 cycle_param;
__le16 acw_anchor_addr;
u8 wssid_hash_list[];
-} __attribute__((packed));
+} __packed;
static inline int wlp_ie_hash_length(struct wlp_ie *ie)
{
@@ -324,7 +324,7 @@ static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length)
*/
struct wlp_nonce {
u8 data[16];
-} __attribute__((packed));
+} __packed;
/**
* WLP UUID
@@ -336,7 +336,7 @@ struct wlp_nonce {
*/
struct wlp_uuid {
u8 data[16];
-} __attribute__((packed));
+} __packed;
/**
@@ -348,7 +348,7 @@ struct wlp_dev_type {
u8 OUI[3];
u8 OUIsubdiv;
__le16 subID;
-} __attribute__((packed));
+} __packed;
/**
* WLP frame header
@@ -357,7 +357,7 @@ struct wlp_dev_type {
struct wlp_frame_hdr {
__le16 mux_hdr; /* WLP_PROTOCOL_ID */
enum wlp_frame_type type:8;
-} __attribute__((packed));
+} __packed;
/**
* WLP attribute field header
@@ -368,7 +368,7 @@ struct wlp_frame_hdr {
struct wlp_attr_hdr {
__le16 type;
__le16 length;
-} __attribute__((packed));
+} __packed;
/**
* Device information commonly used together
@@ -401,13 +401,13 @@ struct wlp_device_info {
struct wlp_attr_##name { \
struct wlp_attr_hdr hdr; \
type name; \
-} __attribute__((packed));
+} __packed;
#define wlp_attr_array(type, name) \
struct wlp_attr_##name { \
struct wlp_attr_hdr hdr; \
type name[]; \
-} __attribute__((packed));
+} __packed;
/**
* WLP association attribute fields
@@ -483,7 +483,7 @@ struct wlp_wss_info {
struct wlp_attr_accept_enrl accept;
struct wlp_attr_wss_sec_status sec_stat;
struct wlp_attr_wss_bcast bcast;
-} __attribute__((packed));
+} __packed;
/* WLP WSS Information */
wlp_attr_array(struct wlp_wss_info, wss_info)
@@ -520,7 +520,7 @@ wlp_attr(u8, wlp_assc_err)
struct wlp_frame_std_abbrv_hdr {
struct wlp_frame_hdr hdr;
u8 tag;
-} __attribute__((packed));
+} __packed;
/**
* WLP association frames
@@ -533,7 +533,7 @@ struct wlp_frame_assoc {
struct wlp_attr_version version;
struct wlp_attr_msg_type msg_type;
u8 attr[];
-} __attribute__((packed));
+} __packed;
/* Ethernet to dev address mapping */
struct wlp_eda {
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c05fd717c588..bab385f13ac3 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -20,6 +20,7 @@ struct tcf_common {
struct gnet_stats_queue tcfc_qstats;
struct gnet_stats_rate_est tcfc_rate_est;
spinlock_t tcfc_lock;
+ struct rcu_head tcfc_rcu;
};
#define tcf_next common.tcfc_next
#define tcf_index common.tcfc_index
@@ -32,6 +33,7 @@ struct tcf_common {
#define tcf_qstats common.tcfc_qstats
#define tcf_rate_est common.tcfc_rate_est
#define tcf_lock common.tcfc_lock
+#define tcf_rcu common.tcfc_rcu
struct tcf_police {
struct tcf_common common;
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 20725e213aee..90c9e2872f27 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -23,7 +23,8 @@ struct unix_address {
};
struct unix_skb_parms {
- struct ucred creds; /* Skb credentials */
+ struct pid *pid; /* Skb credentials */
+ const struct cred *cred;
struct scm_fp_list *fp; /* Passed files */
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Security ID */
@@ -31,7 +32,6 @@ struct unix_skb_parms {
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define UNIXCREDS(skb) (&UNIXCB((skb)).creds)
#define UNIXSID(skb) (&UNIXCB((skb)).secid)
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 318ab9478a44..6da573c75d54 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -50,6 +50,9 @@ struct caif_connect_request {
* @client_layer: User implementation of client layer. This layer
* MUST have receive and control callback functions
* implemented.
+ * @ifindex: Link layer interface index used for this connection.
+ * @headroom: Head room needed by CAIF protocol.
+ * @tailroom: Tail room needed by CAIF protocol.
*
* This function connects a CAIF channel. The Client must implement
* the struct cflayer. This layer represents the Client layer and holds
@@ -59,8 +62,9 @@ struct caif_connect_request {
* E.g. CAIF Socket will call this function for each socket it connects
* and have one client_layer instance for each socket.
*/
-int caif_connect_client(struct caif_connect_request *config,
- struct cflayer *client_layer);
+int caif_connect_client(struct caif_connect_request *conn_req,
+ struct cflayer *client_layer, int *ifindex,
+ int *headroom, int *tailroom);
/**
* caif_disconnect_client - Disconnects a client from the CAIF stack.
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 25c472f0e5b8..c8b07a904e78 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -15,14 +15,8 @@ struct cfpktq;
struct caif_payload_info;
struct caif_packet_funcs;
-#define CAIF_MAX_FRAMESIZE 4096
-#define CAIF_MAX_PAYLOAD_SIZE (4096 - 64)
-#define CAIF_NEEDED_HEADROOM (10)
-#define CAIF_NEEDED_TAILROOM (2)
#define CAIF_LAYER_NAME_SZ 16
-#define CAIF_SUCCESS 1
-#define CAIF_FAILURE 0
/**
* caif_assert() - Assert function for CAIF.
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index 9fc2fc20b884..bd646faffa47 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -7,6 +7,7 @@
#ifndef CFCNFG_H_
#define CFCNFG_H_
#include <linux/spinlock.h>
+#include <linux/netdevice.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfctrl.h>
@@ -73,8 +74,8 @@ void cfcnfg_remove(struct cfcnfg *cfg);
void
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
- void *dev, struct cflayer *phy_layer, u16 *phyid,
- enum cfcnfg_phy_preference pref,
+ struct net_device *dev, struct cflayer *phy_layer,
+ u16 *phyid, enum cfcnfg_phy_preference pref,
bool fcs, bool stx);
/**
@@ -114,11 +115,18 @@ void cfcnfg_release_adap_layer(struct cflayer *adap_layer);
* @param: Link setup parameters.
* @adap_layer: Specify the adaptation layer; the receive and
* flow-control functions MUST be set in the structure.
- *
+ * @ifindex: Link layer interface index used for this connection.
+ * @proto_head: Protocol head-space needed by CAIF protocol,
+ * excluding link layer.
+ * @proto_tail: Protocol tail-space needed by CAIF protocol,
+ * excluding link layer.
*/
int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
struct cfctrl_link_param *param,
- struct cflayer *adap_layer);
+ struct cflayer *adap_layer,
+ int *ifindex,
+ int *proto_head,
+ int *proto_tail);
/**
* cfcnfg_get_phyid() - Get physical ID, given type.
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index 2dc9eb193ecf..b1fa87ee0992 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -16,6 +16,8 @@ struct cfsrvl {
bool open;
bool phy_flow_on;
bool modem_flow_on;
+ bool supports_flowctrl;
+ void (*release)(struct kref *);
struct dev_info dev_info;
struct kref ref;
};
@@ -25,13 +27,15 @@ struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info);
struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info);
-struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info);
+struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info,
+ int mtu_size);
struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
void cfservl_destroy(struct cflayer *layer);
void cfsrvl_init(struct cfsrvl *service,
- u8 channel_id,
- struct dev_info *dev_info);
+ u8 channel_id,
+ struct dev_info *dev_info,
+ bool supports_flowctrl);
bool cfsrvl_ready(struct cfsrvl *service, int *err);
u8 cfsrvl_getphyid(struct cflayer *layer);
@@ -50,7 +54,10 @@ static inline void cfsrvl_put(struct cflayer *layr)
if (layr == NULL)
return;
s = container_of(layr, struct cfsrvl, layer);
- kref_put(&s->ref, cfsrvl_release);
+
+ WARN_ON(!s->release);
+ if (s->release)
+ kref_put(&s->ref, s->release);
}
#endif /* CFSRVL_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b44a2e5321a3..9c45b905aefc 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -37,6 +37,7 @@
*
* @IEEE80211_BAND_2GHZ: 2.4GHz ISM band
* @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
+ * @IEEE80211_NUM_BANDS: number of defined bands
*/
enum ieee80211_band {
IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
@@ -89,7 +90,7 @@ enum ieee80211_channel_flags {
* @max_power: maximum transmission power (in dBm)
* @beacon_found: helper to regulatory code to indicate when a beacon
* has been found on this channel. Use regulatory_hint_found_beacon()
- * to enable this, this is is useful only on 5 GHz band.
+ * to enable this, this is useful only on 5 GHz band.
* @orig_mag: internal use
* @orig_mpwr: internal use
*/
@@ -188,6 +189,7 @@ struct ieee80211_sta_ht_cap {
* in this band. Must be sorted to give a valid "supported
* rates" IE, i.e. CCK rates first, then OFDM.
* @n_bitrates: Number of bitrates in @bitrates
+ * @ht_cap: HT capabilities in this band
*/
struct ieee80211_supported_band {
struct ieee80211_channel *channels;
@@ -225,6 +227,7 @@ struct vif_params {
* @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used
* with the get_key() callback, must be in little endian,
* length given by @seq_len.
+ * @seq_len: length of @seq.
*/
struct key_params {
u8 *key;
@@ -237,6 +240,8 @@ struct key_params {
/**
* enum survey_info_flags - survey information flags
*
+ * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
+ *
* Used by the driver to indicate which info in &struct survey_info
* it has filled in during the get_survey().
*/
@@ -247,13 +252,13 @@ enum survey_info_flags {
/**
* struct survey_info - channel survey response
*
- * Used by dump_survey() to report back per-channel survey information.
- *
* @channel: the channel this survey record reports, mandatory
* @filled: bitflag of flags from &enum survey_info_flags
* @noise: channel noise in dBm. This and all following fields are
* optional
*
+ * Used by dump_survey() to report back per-channel survey information.
+ *
* This structure can later be expanded with things like
* channel duty cycle etc.
*/
@@ -288,7 +293,7 @@ struct beacon_parameters {
*
* @PLINK_ACTION_INVALID: action 0 is reserved
* @PLINK_ACTION_OPEN: start mesh peer link establishment
- * @PLINK_ACTION_BLOCL: block traffic from this mesh peer
+ * @PLINK_ACTION_BLOCK: block traffic from this mesh peer
*/
enum plink_actions {
PLINK_ACTION_INVALID,
@@ -311,6 +316,8 @@ enum plink_actions {
* (bitmask of BIT(NL80211_STA_FLAG_...))
* @listen_interval: listen interval or -1 for no change
* @aid: AID or zero for no change
+ * @plink_action: plink action to take
+ * @ht_capa: HT capabilities of station
*/
struct station_parameters {
u8 *supported_rates;
@@ -448,13 +455,13 @@ enum monitor_flags {
* Used by the driver to indicate which info in &struct mpath_info it has filled
* in during get_station() or dump_station().
*
- * MPATH_INFO_FRAME_QLEN: @frame_qlen filled
- * MPATH_INFO_SN: @sn filled
- * MPATH_INFO_METRIC: @metric filled
- * MPATH_INFO_EXPTIME: @exptime filled
- * MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled
- * MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled
- * MPATH_INFO_FLAGS: @flags filled
+ * @MPATH_INFO_FRAME_QLEN: @frame_qlen filled
+ * @MPATH_INFO_SN: @sn filled
+ * @MPATH_INFO_METRIC: @metric filled
+ * @MPATH_INFO_EXPTIME: @exptime filled
+ * @MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled
+ * @MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled
+ * @MPATH_INFO_FLAGS: @flags filled
*/
enum mpath_info_flags {
MPATH_INFO_FRAME_QLEN = BIT(0),
@@ -587,6 +594,7 @@ struct cfg80211_ssid {
* @ie_len: length of ie in octets
* @wiphy: the wiphy this was for
* @dev: the interface
+ * @aborted: (internal) scan request was notified as aborted
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
@@ -623,6 +631,7 @@ enum cfg80211_signal_type {
* This structure describes a BSS (which may also be a mesh network)
* for use in scan results and similar.
*
+ * @channel: channel this BSS is on
* @bssid: BSSID of the BSS
* @tsf: timestamp of last received update
* @beacon_interval: the beacon interval as from the frame
@@ -801,6 +810,7 @@ struct cfg80211_disassoc_request {
* @beacon_interval: beacon interval to use
* @privacy: this is a protected network, keys will be configured
* after joining
+ * @basic_rates: bitmap of basic rates to use when creating the IBSS
*/
struct cfg80211_ibss_params {
u8 *ssid;
@@ -809,6 +819,7 @@ struct cfg80211_ibss_params {
u8 *ie;
u8 ssid_len, ie_len;
u16 beacon_interval;
+ u32 basic_rates;
bool channel_fixed;
bool privacy;
};
@@ -826,8 +837,8 @@ struct cfg80211_ibss_params {
* @ssid: SSID
* @ssid_len: Length of ssid in octets
* @auth_type: Authentication type (algorithm)
- * @assoc_ie: IEs for association request
- * @assoc_ie_len: Length of assoc_ie in octets
+ * @ie: IEs for association request
+ * @ie_len: Length of assoc_ie in octets
* @privacy: indicates whether privacy-enabled APs should be used
* @crypto: crypto settings
* @key_len: length of WEP key for shared key authentication
@@ -850,10 +861,11 @@ struct cfg80211_connect_params {
/**
* enum wiphy_params_flags - set_wiphy_params bitfield values
- * WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed
- * WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed
- * WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed
- * WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
+ * @WIPHY_PARAM_RETRY_SHORT: wiphy->retry_short has changed
+ * @WIPHY_PARAM_RETRY_LONG: wiphy->retry_long has changed
+ * @WIPHY_PARAM_FRAG_THRESHOLD: wiphy->frag_threshold has changed
+ * @WIPHY_PARAM_RTS_THRESHOLD: wiphy->rts_threshold has changed
+ * @WIPHY_PARAM_COVERAGE_CLASS: coverage class changed
*/
enum wiphy_params_flags {
WIPHY_PARAM_RETRY_SHORT = 1 << 0,
@@ -949,10 +961,16 @@ struct cfg80211_pmksa {
* @del_beacon: Remove beacon configuration and stop sending the beacon.
*
* @add_station: Add a new station.
- *
* @del_station: Remove a station; @mac may be NULL to remove all stations.
- *
* @change_station: Modify a given station.
+ * @get_station: get station information for the station identified by @mac
+ * @dump_station: dump station callback -- resume dump at index @idx
+ *
+ * @add_mpath: add a fixed mesh path
+ * @del_mpath: delete a given mesh path
+ * @change_mpath: change a given mesh path
+ * @get_mpath: get a mesh path for the given parameters
+ * @dump_mpath: dump mesh path callback -- resume dump at index @idx
*
* @get_mesh_params: Put the current mesh parameters into *params
*
@@ -960,8 +978,6 @@ struct cfg80211_pmksa {
* The mask is a bitfield which tells us which parameters to
* set, and which to leave alone.
*
- * @set_mesh_cfg: set mesh parameters (by now, just mesh id)
- *
* @change_bss: Modify parameters for a given BSS.
*
* @set_txq_params: Set TX queue parameters
@@ -1002,6 +1018,8 @@ struct cfg80211_pmksa {
* @get_tx_power: store the current TX power into the dbm variable;
* return 0 if successful
*
+ * @set_wds_peer: set the WDS peer for a WDS interface
+ *
* @rfkill_poll: polls the hw rfkill line, use cfg80211 reporting
* functions to adjust rfkill hw state
*
@@ -1019,6 +1037,8 @@ struct cfg80211_pmksa {
*
* @testmode_cmd: run a test mode command
*
+ * @set_bitrate_mask: set the bitrate mask configuration
+ *
* @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac
* devices running firmwares capable of generating the (re) association
* RSN IE. It allows for faster roaming between WPA2 BSSIDs.
@@ -1168,6 +1188,7 @@ struct cfg80211_ops {
int (*action)(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
+ bool channel_type_valid,
const u8 *buf, size_t len, u64 *cookie);
int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
@@ -1230,8 +1251,6 @@ struct mac_address {
/**
* struct wiphy - wireless hardware description
- * @idx: the wiphy index assigned to this item
- * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name>
* @reg_notifier: the driver's regulatory notification callback
* @regd: the driver's regulatory domain, if one was requested via
* the regulatory_hint() API. This can be used by the driver
@@ -1245,7 +1264,7 @@ struct mac_address {
* @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold);
* -1 = fragmentation disabled, only odd values >= 256 used
* @rts_threshold: RTS threshold (dot11RTSThreshold); -1 = RTS/CTS disabled
- * @net: the network namespace this wiphy currently lives in
+ * @_net: the network namespace this wiphy currently lives in
* @perm_addr: permanent MAC address of this device
* @addr_mask: If the device supports multiple MAC addresses by masking,
* set this to a mask with variable bits set to 1, e.g. if the last
@@ -1258,6 +1277,28 @@ struct mac_address {
* by default for perm_addr. In this case, the mask should be set to
* all-zeroes. In this case it is assumed that the device can handle
* the same number of arbitrary MAC addresses.
+ * @debugfsdir: debugfs directory used for this wiphy, will be renamed
+ * automatically on wiphy renames
+ * @dev: (virtual) struct device for this wiphy
+ * @wext: wireless extension handlers
+ * @priv: driver private data (sized according to wiphy_new() parameter)
+ * @interface_modes: bitmask of interfaces types valid for this wiphy,
+ * must be set by driver
+ * @flags: wiphy flags, see &enum wiphy_flags
+ * @bss_priv_size: each BSS struct has private data allocated with it,
+ * this variable determines its size
+ * @max_scan_ssids: maximum number of SSIDs the device can scan for in
+ * any given scan
+ * @max_scan_ie_len: maximum length of user-controlled IEs device can
+ * add to probe request frames transmitted during a scan, must not
+ * include fixed IEs like supported rates
+ * @coverage_class: current coverage class
+ * @fw_version: firmware version for ethtool reporting
+ * @hw_version: hardware version for ethtool reporting
+ * @max_num_pmkids: maximum number of PMKIDs supported by device
+ * @privid: a pointer that drivers can use to identify if an arbitrary
+ * wiphy is theirs, e.g. in global notifiers
+ * @bands: information about bands/channels supported by this device
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -1330,27 +1371,16 @@ struct wiphy {
char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
};
-#ifdef CONFIG_NET_NS
static inline struct net *wiphy_net(struct wiphy *wiphy)
{
- return wiphy->_net;
+ return read_pnet(&wiphy->_net);
}
static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net)
{
- wiphy->_net = net;
-}
-#else
-static inline struct net *wiphy_net(struct wiphy *wiphy)
-{
- return &init_net;
+ write_pnet(&wiphy->_net, net);
}
-static inline void wiphy_net_set(struct wiphy *wiphy, struct net *net)
-{
-}
-#endif
-
/**
* wiphy_priv - return priv from wiphy
*
@@ -1471,13 +1501,14 @@ struct cfg80211_cached_keys;
* @ssid: (private) Used by the internal configuration code
* @ssid_len: (private) Used by the internal configuration code
* @wext: (private) Used by the internal wireless extensions compat code
- * @wext_bssid: (private) Used by the internal wireless extensions compat code
* @use_4addr: indicates 4addr mode is used on this interface, must be
* set by driver (if supported) on add_interface BEFORE registering the
* netdev and may otherwise be used by driver read-only, will be update
* by cfg80211 on change_interface
* @action_registrations: list of registrations for action frames
* @action_registrations_lock: lock for the list
+ * @mtx: mutex used to lock data in this struct
+ * @cleanup_work: work struct used for cleanup that can't be done directly
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -1551,11 +1582,13 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
/**
* ieee80211_channel_to_frequency - convert channel number to frequency
+ * @chan: channel number
*/
extern int ieee80211_channel_to_frequency(int chan);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
+ * @freq: center frequency
*/
extern int ieee80211_frequency_to_channel(int freq);
@@ -1570,6 +1603,8 @@ extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
int freq);
/**
* ieee80211_get_channel - get channel struct from wiphy for specified frequency
+ * @wiphy: the struct wiphy to get the channel for
+ * @freq: the center frequency of the channel
*/
static inline struct ieee80211_channel *
ieee80211_get_channel(struct wiphy *wiphy, int freq)
@@ -1630,9 +1665,6 @@ struct ieee80211_radiotap_vendor_namespaces {
* @is_radiotap_ns: indicates whether the current namespace is the default
* radiotap namespace or not
*
- * @overrides: override standard radiotap fields
- * @n_overrides: number of overrides
- *
* @_rtheader: pointer to the radiotap header we are walking through
* @_max_length: length of radiotap header in cpu byte ordering
* @_arg_index: next argument index
@@ -1948,10 +1980,12 @@ int cfg80211_wext_giwap(struct net_device *dev,
void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted);
/**
- * cfg80211_inform_bss - inform cfg80211 of a new BSS
+ * cfg80211_inform_bss_frame - inform cfg80211 of a received BSS frame
*
* @wiphy: the wiphy reporting the BSS
- * @bss: the found BSS
+ * @channel: The channel the frame was received on
+ * @mgmt: the management frame (probe response or beacon)
+ * @len: length of the management frame
* @signal: the signal strength, type depends on the wiphy's signal_type
* @gfp: context flags
*
@@ -1964,6 +1998,23 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
struct ieee80211_mgmt *mgmt, size_t len,
s32 signal, gfp_t gfp);
+/**
+ * cfg80211_inform_bss - inform cfg80211 of a new BSS
+ *
+ * @wiphy: the wiphy reporting the BSS
+ * @channel: The channel the frame was received on
+ * @bssid: the BSSID of the BSS
+ * @timestamp: the TSF timestamp sent by the peer
+ * @capability: the capability field sent by the peer
+ * @beacon_interval: the beacon interval announced by the peer
+ * @ie: additional IEs sent by the peer
+ * @ielen: length of the additional IEs
+ * @signal: the signal strength, type depends on the wiphy's signal_type
+ * @gfp: context flags
+ *
+ * This informs cfg80211 that BSS information was found and
+ * the BSS should be updated/added.
+ */
struct cfg80211_bss*
cfg80211_inform_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index 511a459ec10f..0916bbf3bdff 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -101,7 +101,7 @@ struct dn_short_packet {
__le16 dstnode;
__le16 srcnode;
__u8 forward;
-} __attribute__((packed));
+} __packed;
struct dn_long_packet {
__u8 msgflg;
@@ -115,7 +115,7 @@ struct dn_long_packet {
__u8 visit_ct;
__u8 s_class;
__u8 pt;
-} __attribute__((packed));
+} __packed;
/*------------------------- DRP - Routing messages ---------------------*/
@@ -132,7 +132,7 @@ struct endnode_hello_message {
__u8 mpd;
__u8 datalen;
__u8 data[2];
-} __attribute__((packed));
+} __packed;
struct rtnode_hello_message {
__u8 msgflg;
@@ -144,7 +144,7 @@ struct rtnode_hello_message {
__u8 area;
__le16 timer;
__u8 mpd;
-} __attribute__((packed));
+} __packed;
extern void dn_dev_init(void);
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index 17d43d2db5ec..e43a2893f132 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -74,18 +74,18 @@ struct nsp_data_seg_msg {
__u8 msgflg;
__le16 dstaddr;
__le16 srcaddr;
-} __attribute__((packed));
+} __packed;
struct nsp_data_opt_msg {
__le16 acknum;
__le16 segnum;
__le16 lsflgs;
-} __attribute__((packed));
+} __packed;
struct nsp_data_opt_msg1 {
__le16 acknum;
__le16 segnum;
-} __attribute__((packed));
+} __packed;
/* Acknowledgment Message (data/other data) */
@@ -94,13 +94,13 @@ struct nsp_data_ack_msg {
__le16 dstaddr;
__le16 srcaddr;
__le16 acknum;
-} __attribute__((packed));
+} __packed;
/* Connect Acknowledgment Message */
struct nsp_conn_ack_msg {
__u8 msgflg;
__le16 dstaddr;
-} __attribute__((packed));
+} __packed;
/* Connect Initiate/Retransmit Initiate/Connect Confirm */
@@ -117,7 +117,7 @@ struct nsp_conn_init_msg {
#define NSP_FC_MASK 0x0c /* FC type mask */
__u8 info;
__le16 segsize;
-} __attribute__((packed));
+} __packed;
/* Disconnect Initiate/Disconnect Confirm */
struct nsp_disconn_init_msg {
@@ -125,7 +125,7 @@ struct nsp_disconn_init_msg {
__le16 dstaddr;
__le16 srcaddr;
__le16 reason;
-} __attribute__((packed));
+} __packed;
@@ -135,7 +135,7 @@ struct srcobj_fmt {
__le16 grpcode;
__le16 usrcode;
__u8 dlen;
-} __attribute__((packed));
+} __packed;
/*
* A collection of functions for manipulating the sequence
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 60c9f22d8694..ccadab3aa3f6 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -65,9 +65,7 @@ extern void dn_rt_cache_flush(int delay);
* packets to the originating host.
*/
struct dn_route {
- union {
- struct dst_entry dst;
- } u;
+ struct dst_entry dst;
struct flowi fl;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index eb551baafc04..f7dcd2c70412 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -68,26 +68,15 @@ struct genl_info {
#endif
};
-#ifdef CONFIG_NET_NS
static inline struct net *genl_info_net(struct genl_info *info)
{
- return info->_net;
+ return read_pnet(&info->_net);
}
static inline void genl_info_net_set(struct genl_info *info, struct net *net)
{
- info->_net = net;
+ write_pnet(&info->_net, net);
}
-#else
-static inline struct net *genl_info_net(struct genl_info *info)
-{
- return &init_net;
-}
-
-static inline void genl_info_net_set(struct genl_info *info, struct net *net)
-{
-}
-#endif
/**
* struct genl_ops - generic netlink operations
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 1653de515cee..1989cfd7405f 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -137,7 +137,8 @@ struct inet_sock {
hdrincl:1,
mc_loop:1,
transparent:1,
- mc_all:1;
+ mc_all:1,
+ nodefrag:1;
int mc_index;
__be32 mc_addr;
struct ip_mc_socklist *mc_list;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 87b1df0d4d8c..417d0c894f29 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -22,10 +22,21 @@ struct inet_peer {
__u32 dtime; /* the time of last use of not
* referenced entries */
atomic_t refcnt;
- atomic_t rid; /* Frag reception counter */
- atomic_t ip_id_count; /* IP ID for the next packet */
- __u32 tcp_ts;
- __u32 tcp_ts_stamp;
+ /*
+ * Once inet_peer is queued for deletion (refcnt == -1), following fields
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+ * We can share memory with rcu_head to keep inet_peer small
+ * (less then 64 bytes)
+ */
+ union {
+ struct {
+ atomic_t rid; /* Frag reception counter */
+ atomic_t ip_id_count; /* IP ID for the next packet */
+ __u32 tcp_ts;
+ __u32 tcp_ts_stamp;
+ };
+ struct rcu_head rcu;
+ };
};
void inet_initpeers(void) __init;
@@ -36,10 +47,21 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create);
/* can be called from BH context or outside */
extern void inet_putpeer(struct inet_peer *p);
+/*
+ * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * tcp_ts_stamp if no refcount is taken on inet_peer
+ */
+static inline void inet_peer_refcheck(const struct inet_peer *p)
+{
+ WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
+}
+
+
/* can be called with or without local BH being disabled */
static inline __u16 inet_getid(struct inet_peer *p, int more)
{
more++;
+ inet_peer_refcheck(p);
return atomic_add_return(more, &p->ip_id_count) - more;
}
diff --git a/include/net/ip.h b/include/net/ip.h
index 452f229c380a..d52f01180361 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -61,11 +61,14 @@ struct ipcm_cookie {
struct ip_ra_chain {
struct ip_ra_chain *next;
struct sock *sk;
- void (*destructor)(struct sock *);
+ union {
+ void (*destructor)(struct sock *);
+ struct sock *saved_sk;
+ };
+ struct rcu_head rcu;
};
extern struct ip_ra_chain *ip_ra_chain;
-extern rwlock_t ip_ra_lock;
/* IP flags. */
#define IP_CE 0x8000 /* Flag: "Congestion" */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 4b1dc1161c37..062a823d311c 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -84,13 +84,11 @@ struct rt6key {
struct fib6_table;
struct rt6_info {
- union {
- struct dst_entry dst;
- } u;
+ struct dst_entry dst;
-#define rt6i_dev u.dst.dev
-#define rt6i_nexthop u.dst.neighbour
-#define rt6i_expires u.dst.expires
+#define rt6i_dev dst.dev
+#define rt6i_nexthop dst.neighbour
+#define rt6i_expires dst.expires
/*
* Tail elements of dst_entry (__refcnt etc.)
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fbf9d1cda27b..fc94ec568a50 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -27,6 +27,6 @@ struct ipv6_tlv_tnl_enc_lim {
__u8 type; /* type-code for option */
__u8 length; /* option length */
__u8 encap_limit; /* tunnel encapsulation limit */
-} __attribute__ ((packed));
+} __packed;
#endif
diff --git a/include/net/ipip.h b/include/net/ipip.h
index 11e8513d2d07..65caea8b414f 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -50,7 +50,7 @@ struct ip_tunnel_prl_entry {
int pkt_len = skb->len - skb_transport_offset(skb); \
\
skb->ip_summed = CHECKSUM_NONE; \
- ip_select_ident(iph, &rt->u.dst, NULL); \
+ ip_select_ident(iph, &rt->dst, NULL); \
\
err = ip_local_out(skb); \
if (likely(net_xmit_eval(err) == 0)) { \
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 2600b69757b8..f5808d596aab 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -551,6 +551,10 @@ extern int ipv6_ext_hdr(u8 nexthdr);
extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
+extern struct in6_addr *fl6_update_dst(struct flowi *fl,
+ const struct ipv6_txoptions *opt,
+ struct in6_addr *orig);
+
/*
* socket options (ipv6_sockglue.c)
*/
diff --git a/include/net/ipx.h b/include/net/ipx.h
index ef51a668ba19..05d7e4a88b49 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -27,9 +27,9 @@ struct ipx_address {
#define IPX_MAX_PPROP_HOPS 8
struct ipxhdr {
- __be16 ipx_checksum __attribute__ ((packed));
+ __be16 ipx_checksum __packed;
#define IPX_NO_CHECKSUM cpu_to_be16(0xFFFF)
- __be16 ipx_pktsize __attribute__ ((packed));
+ __be16 ipx_pktsize __packed;
__u8 ipx_tctrl;
__u8 ipx_type;
#define IPX_TYPE_UNKNOWN 0x00
@@ -38,8 +38,8 @@ struct ipxhdr {
#define IPX_TYPE_SPX 0x05 /* SPX protocol */
#define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */
#define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast */
- struct ipx_address ipx_dest __attribute__ ((packed));
- struct ipx_address ipx_source __attribute__ ((packed));
+ struct ipx_address ipx_dest __packed;
+ struct ipx_address ipx_source __packed;
};
static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index de22cbfef232..fe1a3a603375 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -146,6 +146,7 @@ struct ieee80211_low_level_stats {
* enabled/disabled (beaconing modes)
* @BSS_CHANGED_CQM: Connection quality monitor config changed
* @BSS_CHANGED_IBSS: IBSS join status changed
+ * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed.
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -160,10 +161,18 @@ enum ieee80211_bss_change {
BSS_CHANGED_BEACON_ENABLED = 1<<9,
BSS_CHANGED_CQM = 1<<10,
BSS_CHANGED_IBSS = 1<<11,
+ BSS_CHANGED_ARP_FILTER = 1<<12,
/* when adding here, make sure to change ieee80211_reconfig */
};
+/*
+ * The maximum number of IPv4 addresses listed for ARP filtering. If the number
+ * of addresses for an interface increase beyond this value, hardware ARP
+ * filtering will be disabled.
+ */
+#define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4
+
/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
@@ -199,6 +208,15 @@ enum ieee80211_bss_change {
* @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
* implies disabled
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
+ * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
+ * may filter ARP queries targeted for other addresses than listed here.
+ * The driver must allow ARP queries targeted for all address listed here
+ * to pass through. An empty list implies no ARP queries need to pass.
+ * @arp_addr_cnt: Number of addresses currently on the list.
+ * @arp_filter_enabled: Enable ARP filtering - if enabled, the hardware may
+ * filter ARP queries based on the @arp_addr_list, if disabled, the
+ * hardware must not perform any ARP filtering. Note, that the filter will
+ * be enabled also in promiscuous mode.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -219,6 +237,9 @@ struct ieee80211_bss_conf {
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
enum nl80211_channel_type channel_type;
+ __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ u8 arp_addr_cnt;
+ bool arp_filter_enabled;
};
/**
@@ -312,9 +333,10 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_INTFL_NL80211_FRAME_TX = BIT(21),
IEEE80211_TX_CTL_LDPC = BIT(22),
IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24),
-#define IEEE80211_TX_CTL_STBC_SHIFT 23
};
+#define IEEE80211_TX_CTL_STBC_SHIFT 23
+
/**
* enum mac80211_rate_control_flags - per-rate flags set by the
* Rate Control algorithm.
@@ -412,8 +434,6 @@ struct ieee80211_tx_rate {
* @driver_data: array of driver_data pointers
* @ampdu_ack_len: number of acked aggregated frames.
* relevant only if IEEE80211_TX_STAT_AMPDU was set.
- * @ampdu_ack_map: block ack bit map for the aggregation.
- * relevant only if IEEE80211_TX_STAT_AMPDU was set.
* @ampdu_len: number of aggregated frames.
* relevant only if IEEE80211_TX_STAT_AMPDU was set.
* @ack_signal: signal strength of the ACK frame
@@ -448,10 +468,9 @@ struct ieee80211_tx_info {
struct {
struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
u8 ampdu_ack_len;
- u64 ampdu_ack_map;
int ack_signal;
u8 ampdu_len;
- /* 7 bytes free */
+ /* 15 bytes free */
} status;
struct {
struct ieee80211_tx_rate driver_rates[
@@ -676,9 +695,6 @@ enum ieee80211_smps_mode {
* @dynamic_ps_timeout: The dynamic powersave timeout (in ms), see the
* powersave documentation below. This variable is valid only when
* the CONF_PS flag is set.
- * @dynamic_ps_forced_timeout: The dynamic powersave timeout (in ms) configured
- * by cfg80211 (essentially, wext) If set, this value overrules the value
- * chosen by mac80211 based on ps qos network latency.
*
* @power_level: requested transmit power (in dBm)
*
@@ -698,7 +714,7 @@ enum ieee80211_smps_mode {
*/
struct ieee80211_conf {
u32 flags;
- int power_level, dynamic_ps_timeout, dynamic_ps_forced_timeout;
+ int power_level, dynamic_ps_timeout;
int max_sleep_period;
u16 listen_interval;
@@ -815,7 +831,6 @@ enum ieee80211_key_flags {
* encrypted in hardware.
* @alg: The key algorithm.
* @flags: key flags, see &enum ieee80211_key_flags.
- * @ap_addr: AP's MAC address
* @keyidx: the key index (0-3)
* @keylen: key material length
* @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte)
@@ -881,16 +896,12 @@ struct ieee80211_sta {
* enum sta_notify_cmd - sta notify command
*
* Used with the sta_notify() callback in &struct ieee80211_ops, this
- * indicates addition and removal of a station to station table,
- * or if a associated station made a power state transition.
+ * indicates if an associated station made a power state transition.
*
- * @STA_NOTIFY_ADD: (DEPRECATED) a station was added to the station table
- * @STA_NOTIFY_REMOVE: (DEPRECATED) a station being removed from the station table
* @STA_NOTIFY_SLEEP: a station is now sleeping
* @STA_NOTIFY_AWAKE: a sleeping station woke up
*/
enum sta_notify_cmd {
- STA_NOTIFY_ADD, STA_NOTIFY_REMOVE,
STA_NOTIFY_SLEEP, STA_NOTIFY_AWAKE,
};
@@ -1451,7 +1462,7 @@ enum ieee80211_filter_flags {
*
* Note that drivers MUST be able to deal with a TX aggregation
* session being stopped even before they OK'ed starting it by
- * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer
+ * calling ieee80211_start_tx_ba_cb_irqsafe, because the peer
* might receive the addBA frame and send a delBA right away!
*
* @IEEE80211_AMPDU_RX_START: start Rx aggregation
@@ -1636,7 +1647,7 @@ enum ieee80211_ampdu_mlme_action {
* is the first frame we expect to perform the action on. Notice
* that TX/RX_STOP can pass NULL for this parameter.
* Returns a negative error code on failure.
- * The callback must be atomic.
+ * The callback can sleep.
*
* @get_survey: Return per-channel survey information
*
@@ -2307,25 +2318,14 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
/**
- * ieee80211_start_tx_ba_cb - low level driver ready to aggregate.
- * @vif: &struct ieee80211_vif pointer from the add_interface callback
- * @ra: receiver address of the BA session recipient.
- * @tid: the TID to BA on.
- *
- * This function must be called by low level driver once it has
- * finished with preparations for the BA session.
- */
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
-
-/**
* ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate.
* @vif: &struct ieee80211_vif pointer from the add_interface callback
* @ra: receiver address of the BA session recipient.
* @tid: the TID to BA on.
*
* This function must be called by low level driver once it has
- * finished with preparations for the BA session.
- * This version of the function is IRQ-safe.
+ * finished with preparations for the BA session. It can be called
+ * from any context.
*/
void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
u16 tid);
@@ -2334,27 +2334,14 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
* ieee80211_stop_tx_ba_session - Stop a Block Ack session.
* @sta: the station whose BA session to stop
* @tid: the TID to stop BA.
- * @initiator: if indicates initiator DELBA frame will be sent.
*
- * Return: error if no sta with matching da found, success otherwise
+ * Return: negative error if the TID is invalid, or no aggregation active
*
* Although mac80211/low level driver/user space application can estimate
* the need to stop aggregation on a certain RA/TID, the session level
* will be managed by the mac80211.
*/
-int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid,
- enum ieee80211_back_parties initiator);
-
-/**
- * ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate.
- * @vif: &struct ieee80211_vif pointer from the add_interface callback
- * @ra: receiver address of the BA session recipient.
- * @tid: the desired TID to BA on.
- *
- * This function must be called by low level driver once it has
- * finished with preparations for the BA session tear down.
- */
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
/**
* ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate.
@@ -2363,8 +2350,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
* @tid: the desired TID to BA on.
*
* This function must be called by low level driver once it has
- * finished with preparations for the BA session tear down.
- * This version of the function is IRQ-safe.
+ * finished with preparations for the BA session tear down. It
+ * can be called from any context.
*/
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, const u8 *ra,
u16 tid);
diff --git a/include/net/mip6.h b/include/net/mip6.h
index a83ad1982a90..26ba99b5a4b1 100644
--- a/include/net/mip6.h
+++ b/include/net/mip6.h
@@ -39,7 +39,7 @@ struct ip6_mh {
__u16 ip6mh_cksum;
/* Followed by type specific messages */
__u8 data[0];
-} __attribute__ ((__packed__));
+} __packed;
#define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */
#define IP6_MH_TYPE_HOTI 1 /* HOTI Message */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index f76f22d05721..895997bc2ead 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -82,7 +82,7 @@ struct ra_msg {
struct nd_opt_hdr {
__u8 nd_opt_type;
__u8 nd_opt_len;
-} __attribute__((__packed__));
+} __packed;
extern int ndisc_init(void);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index bde095f7e845..e624dae54fa4 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -152,11 +152,7 @@ extern struct net init_net;
static inline struct net *nf_ct_net(const struct nf_conn *ct)
{
-#ifdef CONFIG_NET_NS
- return ct->ct_net;
-#else
- return &init_net;
-#endif
+ return read_pnet(&ct->ct_net);
}
/* Alter reply tuple (maybe alter helper). */
@@ -261,7 +257,12 @@ extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
u32 seq);
/* Fake conntrack entry for untracked connections */
-extern struct nf_conn nf_conntrack_untracked;
+DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+static inline struct nf_conn *nf_ct_untracked_get(void)
+{
+ return &__raw_get_cpu_var(nf_conntrack_untracked);
+}
+extern void nf_ct_untracked_status_or(unsigned long bits);
/* Iterate over all conntracks: if iter returns true, it's deleted. */
extern void
@@ -289,9 +290,9 @@ static inline int nf_ct_is_dying(struct nf_conn *ct)
return test_bit(IPS_DYING_BIT, &ct->status);
}
-static inline int nf_ct_is_untracked(const struct sk_buff *skb)
+static inline int nf_ct_is_untracked(const struct nf_conn *ct)
{
- return (skb->nfct == &nf_conntrack_untracked.ct_general);
+ return test_bit(IPS_UNTRACKED_BIT, &ct->status);
}
extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 3d7524fba194..aced085132e7 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -60,7 +60,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conn *ct = (struct nf_conn *)skb->nfct;
int ret = NF_ACCEPT;
- if (ct && ct != &nf_conntrack_untracked) {
+ if (ct && !nf_ct_is_untracked(ct)) {
if (!nf_ct_is_confirmed(ct))
ret = __nf_conntrack_confirm(skb);
if (likely(ret == NF_ACCEPT))
diff --git a/include/net/netfilter/xt_rateest.h b/include/net/netfilter/xt_rateest.h
index ddbf37e19616..5a2978d1cb22 100644
--- a/include/net/netfilter/xt_rateest.h
+++ b/include/net/netfilter/xt_rateest.h
@@ -2,13 +2,18 @@
#define _XT_RATEEST_H
struct xt_rateest {
+ /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
+ struct gnet_stats_basic_packed bstats;
+ spinlock_t lock;
+ /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
+ struct gnet_stats_rate_est rstats;
+
+ /* following fields not accessed in hot path */
struct hlist_node list;
char name[IFNAMSIZ];
unsigned int refcnt;
- spinlock_t lock;
struct gnet_estimator params;
- struct gnet_stats_rate_est rstats;
- struct gnet_stats_basic_packed bstats;
+ struct rcu_head rcu;
};
extern struct xt_rateest *xt_rateest_lookup(const char *name);
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 4fc05b58503e..f3b201d335b3 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -35,7 +35,7 @@
* nlmsg_new() create a new netlink message
* nlmsg_put() add a netlink message to an skb
* nlmsg_put_answer() callback based nlmsg_put()
- * nlmsg_end() finanlize netlink message
+ * nlmsg_end() finalize netlink message
* nlmsg_get_pos() return current position in message
* nlmsg_trim() trim part of message
* nlmsg_cancel() cancel message construction
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index d7b989ca3d63..2d16783d5e20 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -34,6 +34,7 @@ struct phonet_device {
struct list_head list;
struct net_device *netdev;
DECLARE_BITMAP(addrs, 64);
+ struct rcu_head rcu;
};
int phonet_device_init(void);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 9d4d87cc970e..d9549af6929a 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -95,7 +95,7 @@ extern void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
- if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
+ if (qdisc_run_begin(q))
__qdisc_run(q);
}
diff --git a/include/net/route.h b/include/net/route.h
index af6cf4b4c9dc..bd732d62e1c3 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -50,9 +50,7 @@
struct fib_nh;
struct inet_peer;
struct rtable {
- union {
- struct dst_entry dst;
- } u;
+ struct dst_entry dst;
/* Cache lookup keys */
struct flowi fl;
@@ -144,7 +142,7 @@ extern void fib_add_ifaddr(struct in_ifaddr *);
static inline void ip_rt_put(struct rtable * rt)
{
if (rt)
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
}
#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 03ca5d826757..b35301b0c7b6 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -23,11 +23,17 @@ struct qdisc_rate_table {
};
enum qdisc_state_t {
- __QDISC_STATE_RUNNING,
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
};
+/*
+ * following bits are only changed while qdisc lock is held
+ */
+enum qdisc___state_t {
+ __QDISC___STATE_RUNNING,
+};
+
struct qdisc_size_table {
struct list_head list;
struct tc_sizespec szopts;
@@ -72,10 +78,27 @@ struct Qdisc {
unsigned long state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
+ unsigned long __state;
struct gnet_stats_queue qstats;
- struct rcu_head rcu_head;
+ struct rcu_head rcu_head;
+ spinlock_t busylock;
};
+static inline bool qdisc_is_running(struct Qdisc *qdisc)
+{
+ return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+}
+
+static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+{
+ return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+}
+
+static inline void qdisc_run_end(struct Qdisc *qdisc)
+{
+ __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+}
+
struct Qdisc_class_ops {
/* Child qdisc manipulation */
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
diff --git a/include/net/scm.h b/include/net/scm.h
index 8360e47aa7e3..31656506d967 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -19,8 +19,10 @@ struct scm_fp_list {
};
struct scm_cookie {
- struct ucred creds; /* Skb credentials */
+ struct pid *pid; /* Skb credentials */
+ const struct cred *cred;
struct scm_fp_list *fp; /* Passed files */
+ struct ucred creds; /* Skb credentials */
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Passed security ID */
#endif
@@ -42,8 +44,27 @@ static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_co
{ }
#endif /* CONFIG_SECURITY_NETWORK */
+static __inline__ void scm_set_cred(struct scm_cookie *scm,
+ struct pid *pid, const struct cred *cred)
+{
+ scm->pid = get_pid(pid);
+ scm->cred = get_cred(cred);
+ cred_to_ucred(pid, cred, &scm->creds);
+}
+
+static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
+{
+ put_pid(scm->pid);
+ scm->pid = NULL;
+
+ if (scm->cred)
+ put_cred(scm->cred);
+ scm->cred = NULL;
+}
+
static __inline__ void scm_destroy(struct scm_cookie *scm)
{
+ scm_destroy_cred(scm);
if (scm && scm->fp)
__scm_destroy(scm);
}
@@ -51,10 +72,7 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
struct scm_cookie *scm)
{
- struct task_struct *p = current;
- scm->creds.uid = current_uid();
- scm->creds.gid = current_gid();
- scm->creds.pid = task_tgid_vnr(p);
+ scm_set_cred(scm, task_tgid(current), current_cred());
scm->fp = NULL;
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
@@ -96,6 +114,8 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
if (test_bit(SOCK_PASSCRED, &sock->flags))
put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds);
+ scm_destroy_cred(scm);
+
scm_passec(sock, msg, scm);
if (!scm->fp)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 4b860116e096..f9e7473613bd 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -443,7 +443,7 @@ struct sctp_signed_cookie {
__u8 signature[SCTP_SECRET_SIZE];
__u32 __pad; /* force sctp_cookie alignment to 64 bits */
struct sctp_cookie c;
-} __attribute__((packed));
+} __packed;
/* This is another convenience type to allocate memory for address
* params for the maximum size and pass such structures around
@@ -488,7 +488,7 @@ typedef struct sctp_sender_hb_info {
union sctp_addr daddr;
unsigned long sent_at;
__u64 hb_nonce;
-} __attribute__((packed)) sctp_sender_hb_info_t;
+} __packed sctp_sender_hb_info_t;
/*
* RFC 2960 1.3.2 Sequenced Delivery within Streams
diff --git a/include/net/sock.h b/include/net/sock.h
index 731150d52799..4f26f2f83be9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -295,7 +295,8 @@ struct sock {
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
- struct ucred sk_peercred;
+ struct pid *sk_peer_pid;
+ const struct cred *sk_peer_cred;
long sk_rcvtimeo;
long sk_sndtimeo;
struct sk_filter *sk_filter;
@@ -1711,19 +1712,13 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e
static inline
struct net *sock_net(const struct sock *sk)
{
-#ifdef CONFIG_NET_NS
- return sk->sk_net;
-#else
- return &init_net;
-#endif
+ return read_pnet(&sk->sk_net);
}
static inline
void sock_net_set(struct sock *sk, struct net *net)
{
-#ifdef CONFIG_NET_NS
- sk->sk_net = net;
-#endif
+ write_pnet(&sk->sk_net, net);
}
/*
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a1449144848a..18c246c9b009 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -464,7 +464,7 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
__u16 *mss);
extern __u32 cookie_init_timestamp(struct request_sock *req);
-extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
+extern bool cookie_check_timestamp(struct tcp_options_received *tcp_opt);
/* From net/ipv6/syncookies.c */
extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
@@ -602,6 +602,17 @@ extern u32 __tcp_select_window(struct sock *sk);
*/
#define tcp_time_stamp ((__u32)(jiffies))
+#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
+
+#define TCPHDR_FIN 0x01
+#define TCPHDR_SYN 0x02
+#define TCPHDR_RST 0x04
+#define TCPHDR_PSH 0x08
+#define TCPHDR_ACK 0x10
+#define TCPHDR_URG 0x20
+#define TCPHDR_ECE 0x40
+#define TCPHDR_CWR 0x80
+
/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission
* code. We also store the host-order sequence numbers in
@@ -620,19 +631,6 @@ struct tcp_skb_cb {
__u32 end_seq; /* SEQ + FIN + SYN + datalen */
__u32 when; /* used to compute rtt's */
__u8 flags; /* TCP header flags. */
-
- /* NOTE: These must match up to the flags byte in a
- * real TCP header.
- */
-#define TCPCB_FLAG_FIN 0x01
-#define TCPCB_FLAG_SYN 0x02
-#define TCPCB_FLAG_RST 0x04
-#define TCPCB_FLAG_PSH 0x08
-#define TCPCB_FLAG_ACK 0x10
-#define TCPCB_FLAG_URG 0x20
-#define TCPCB_FLAG_ECE 0x40
-#define TCPCB_FLAG_CWR 0x80
-
__u8 sacked; /* State flags for SACK/FACK. */
#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
@@ -1413,7 +1411,8 @@ struct tcp_iter_state {
sa_family_t family;
enum tcp_seq_states state;
struct sock *syn_wait_sk;
- int bucket, sbucket, num, uid;
+ int bucket, offset, sbucket, num, uid;
+ loff_t last_pos;
};
extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index b69e6e173ea1..9b2c30897e50 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -65,7 +65,7 @@ struct rxrpc_header {
};
__be16 serviceId; /* service ID */
-} __attribute__((packed));
+} __packed;
#define __rxrpc_header_off(X) offsetof(struct rxrpc_header,X)
@@ -120,7 +120,7 @@ struct rxrpc_ackpacket {
#define RXRPC_ACK_TYPE_NACK 0
#define RXRPC_ACK_TYPE_ACK 1
-} __attribute__((packed));
+} __packed;
/*
* ACK packets can have a further piece of information tagged on the end
@@ -141,7 +141,7 @@ struct rxkad_challenge {
__be32 nonce; /* encrypted random number */
__be32 min_level; /* minimum security level */
__be32 __padding; /* padding to 8-byte boundary */
-} __attribute__((packed));
+} __packed;
/*****************************************************************************/
/*
@@ -164,7 +164,7 @@ struct rxkad_response {
__be32 kvno; /* Kerberos key version number */
__be32 ticket_len; /* Kerberos ticket length */
-} __attribute__((packed));
+} __packed;
/*****************************************************************************/
/*
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index b2d70d38dff4..25915832291a 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -9,6 +9,7 @@
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
+#include <linux/highuid.h>
#include <linux/cred.h>
/*
@@ -82,3 +83,46 @@ void free_user_ns(struct kref *kref)
schedule_work(&ns->destroyer);
}
EXPORT_SYMBOL(free_user_ns);
+
+uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid)
+{
+ struct user_namespace *tmp;
+
+ if (likely(to == cred->user->user_ns))
+ return uid;
+
+
+ /* Is cred->user the creator of the target user_ns
+ * or the creator of one of it's parents?
+ */
+ for ( tmp = to; tmp != &init_user_ns;
+ tmp = tmp->creator->user_ns ) {
+ if (cred->user == tmp->creator) {
+ return (uid_t)0;
+ }
+ }
+
+ /* No useful relationship so no mapping */
+ return overflowuid;
+}
+
+gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid)
+{
+ struct user_namespace *tmp;
+
+ if (likely(to == cred->user->user_ns))
+ return gid;
+
+ /* Is cred->user the creator of the target user_ns
+ * or the creator of one of it's parents?
+ */
+ for ( tmp = to; tmp != &init_user_ns;
+ tmp = tmp->creator->user_ns ) {
+ if (cred->user == tmp->creator) {
+ return (gid_t)0;
+ }
+ }
+
+ /* No useful relationship so no mapping */
+ return overflowgid;
+}
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index afead353e215..df56f5ce887c 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -278,8 +278,9 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
{
struct net_device *vlandev = (struct net_device *) seq->private;
const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
- const struct net_device_stats *stats;
+ const struct rtnl_link_stats64 *stats;
static const char fmt[] = "%30s %12lu\n";
+ static const char fmt64[] = "%30s %12llu\n";
int i;
if (!is_vlan_dev(vlandev))
@@ -291,12 +292,12 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
vlandev->name, dev_info->vlan_id,
(int)(dev_info->flags & 1), vlandev->priv_flags);
- seq_printf(seq, fmt, "total frames received", stats->rx_packets);
- seq_printf(seq, fmt, "total bytes received", stats->rx_bytes);
- seq_printf(seq, fmt, "Broadcast/Multicast Rcvd", stats->multicast);
+ seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
+ seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
+ seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast);
seq_puts(seq, "\n");
- seq_printf(seq, fmt, "total frames transmitted", stats->tx_packets);
- seq_printf(seq, fmt, "total bytes transmitted", stats->tx_bytes);
+ seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
+ seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
seq_printf(seq, fmt, "total headroom inc",
dev_info->cnt_inc_headroom_on_tx);
seq_printf(seq, fmt, "total encap on xmit",
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 313aba11316b..95fdd1185067 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -522,7 +522,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
error = ip_route_output_key(&init_net, &rt, &fl);
if (error)
return error;
- neigh = __neigh_lookup(&clip_tbl, &ip, rt->u.dst.dev, 1);
+ neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
ip_rt_put(rt);
if (!neigh)
return -ENOMEM;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 0d9e506f5d5a..70672544db86 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -86,26 +86,26 @@ struct bnep_setup_conn_req {
__u8 ctrl;
__u8 uuid_size;
__u8 service[0];
-} __attribute__((packed));
+} __packed;
struct bnep_set_filter_req {
__u8 type;
__u8 ctrl;
__be16 len;
__u8 list[0];
-} __attribute__((packed));
+} __packed;
struct bnep_control_rsp {
__u8 type;
__u8 ctrl;
__be16 resp;
-} __attribute__((packed));
+} __packed;
struct bnep_ext_hdr {
__u8 type;
__u8 len;
__u8 data[0];
-} __attribute__((packed));
+} __packed;
/* BNEP ioctl defines */
#define BNEPCONNADD _IOW('B', 200, int)
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 76357b547752..c8436fa31344 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -63,7 +63,6 @@ static int __init br_init(void)
goto err_out4;
brioctl_set(br_ioctl_deviceless_stub);
- br_handle_frame_hook = br_handle_frame;
#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
br_fdb_test_addr_hook = br_fdb_test_addr;
@@ -100,7 +99,6 @@ static void __exit br_deinit(void)
br_fdb_test_addr_hook = NULL;
#endif
- br_handle_frame_hook = NULL;
br_fdb_fini();
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index eedf2c94820e..edf639e96281 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -38,8 +38,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
+ u64_stats_update_begin(&brstats->syncp);
brstats->tx_packets++;
brstats->tx_bytes += skb->len;
+ u64_stats_update_end(&brstats->syncp);
BR_INPUT_SKB_CB(skb)->brdev = dev;
@@ -47,6 +49,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb_pull(skb, ETH_HLEN);
if (is_multicast_ether_addr(dest)) {
+ if (unlikely(netpoll_tx_running(dev))) {
+ br_flood_deliver(br, skb);
+ goto out;
+ }
if (br_multicast_rcv(br, NULL, skb))
goto out;
@@ -92,21 +98,25 @@ static int br_dev_stop(struct net_device *dev)
return 0;
}
-static struct net_device_stats *br_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct br_cpu_netstats sum = { 0 };
+ struct rtnl_link_stats64 *stats = &dev->stats64;
+ struct br_cpu_netstats tmp, sum = { 0 };
unsigned int cpu;
for_each_possible_cpu(cpu) {
+ unsigned int start;
const struct br_cpu_netstats *bstats
= per_cpu_ptr(br->stats, cpu);
-
- sum.tx_bytes += bstats->tx_bytes;
- sum.tx_packets += bstats->tx_packets;
- sum.rx_bytes += bstats->rx_bytes;
- sum.rx_packets += bstats->rx_packets;
+ do {
+ start = u64_stats_fetch_begin(&bstats->syncp);
+ memcpy(&tmp, bstats, sizeof(tmp));
+ } while (u64_stats_fetch_retry(&bstats->syncp, start));
+ sum.tx_bytes += tmp.tx_bytes;
+ sum.tx_packets += tmp.tx_packets;
+ sum.rx_bytes += tmp.rx_bytes;
+ sum.rx_packets += tmp.rx_packets;
}
stats->tx_bytes = sum.tx_bytes;
@@ -127,7 +137,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
#ifdef CONFIG_BRIDGE_NETFILTER
/* remember the MTU in the rtable for PMTU */
- br->fake_rtable.u.dst.metrics[RTAX_MTU - 1] = new_mtu;
+ br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu;
#endif
return 0;
@@ -199,73 +209,81 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static bool br_devices_support_netpoll(struct net_bridge *br)
+static void br_poll_controller(struct net_device *br_dev)
{
- struct net_bridge_port *p;
- bool ret = true;
- int count = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&br->lock, flags);
- list_for_each_entry(p, &br->port_list, list) {
- count++;
- if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
- !p->dev->netdev_ops->ndo_poll_controller)
- ret = false;
- }
- spin_unlock_irqrestore(&br->lock, flags);
- return count != 0 && ret;
}
-static void br_poll_controller(struct net_device *br_dev)
+static void br_netpoll_cleanup(struct net_device *dev)
{
- struct netpoll *np = br_dev->npinfo->netpoll;
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_port *p, *n;
- if (np->real_dev != br_dev)
- netpoll_poll_dev(np->real_dev);
+ list_for_each_entry_safe(p, n, &br->port_list, list) {
+ br_netpoll_disable(p);
+ }
}
-void br_netpoll_cleanup(struct net_device *dev)
+static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p, *n;
- const struct net_device_ops *ops;
+ int err = 0;
- br->dev->npinfo = NULL;
list_for_each_entry_safe(p, n, &br->port_list, list) {
- if (p->dev) {
- ops = p->dev->netdev_ops;
- if (ops->ndo_netpoll_cleanup)
- ops->ndo_netpoll_cleanup(p->dev);
- else
- p->dev->npinfo = NULL;
- }
+ if (!p->dev)
+ continue;
+
+ err = br_netpoll_enable(p);
+ if (err)
+ goto fail;
}
+
+out:
+ return err;
+
+fail:
+ br_netpoll_cleanup(dev);
+ goto out;
}
-void br_netpoll_disable(struct net_bridge *br,
- struct net_device *dev)
+int br_netpoll_enable(struct net_bridge_port *p)
{
- if (br_devices_support_netpoll(br))
- br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
- if (dev->netdev_ops->ndo_netpoll_cleanup)
- dev->netdev_ops->ndo_netpoll_cleanup(dev);
- else
- dev->npinfo = NULL;
+ struct netpoll *np;
+ int err = 0;
+
+ np = kzalloc(sizeof(*p->np), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!np)
+ goto out;
+
+ np->dev = p->dev;
+
+ err = __netpoll_setup(np);
+ if (err) {
+ kfree(np);
+ goto out;
+ }
+
+ p->np = np;
+
+out:
+ return err;
}
-void br_netpoll_enable(struct net_bridge *br,
- struct net_device *dev)
+void br_netpoll_disable(struct net_bridge_port *p)
{
- if (br_devices_support_netpoll(br)) {
- br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
- if (br->dev->npinfo)
- dev->npinfo = br->dev->npinfo;
- } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) {
- br->dev->priv_flags |= IFF_DISABLE_NETPOLL;
- br_info(br,"new device %s does not support netpoll (disabling)",
- dev->name);
- }
+ struct netpoll *np = p->np;
+
+ if (!np)
+ return;
+
+ p->np = NULL;
+
+ /* Wait for transmitting packets to finish before freeing. */
+ synchronize_rcu_bh();
+
+ __netpoll_cleanup(np);
+ kfree(np);
}
#endif
@@ -288,12 +306,13 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_open = br_dev_open,
.ndo_stop = br_dev_stop,
.ndo_start_xmit = br_dev_xmit,
- .ndo_get_stats = br_get_stats,
+ .ndo_get_stats64 = br_get_stats64,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_multicast_list = br_dev_set_multicast_list,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_setup = br_netpoll_setup,
.ndo_netpoll_cleanup = br_netpoll_cleanup,
.ndo_poll_controller = br_poll_controller,
#endif
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index b01dde35a69e..a744296fc675 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -240,11 +240,11 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
struct net_bridge_fdb_entry *fdb;
int ret;
- if (!dev->br_port)
+ if (!br_port_exists(dev))
return 0;
rcu_read_lock();
- fdb = __br_fdb_get(dev->br_port->br, addr);
+ fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr);
ret = fdb && fdb->dst->dev != dev &&
fdb->dst->state == BR_STATE_FORWARDING;
rcu_read_unlock();
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a4e72a89e4ff..cbfe87f0f34a 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
kfree_skb(skb);
else {
skb_push(skb, ETH_HLEN);
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
- netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
- skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
- } else
-#endif
- dev_queue_xmit(skb);
+ dev_queue_xmit(skb);
}
}
@@ -73,23 +66,20 @@ int br_forward_finish(struct sk_buff *skb)
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
-#ifdef CONFIG_NET_POLL_CONTROLLER
- struct net_bridge *br = to->br;
- if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
- struct netpoll *np;
- to->dev->npinfo = skb->dev->npinfo;
- np = skb->dev->npinfo->netpoll;
- np->real_dev = np->dev = to->dev;
- to->dev->priv_flags |= IFF_IN_NETPOLL;
- }
-#endif
skb->dev = to->dev;
+
+ if (unlikely(netpoll_tx_running(to->dev))) {
+ if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
+ kfree_skb(skb);
+ else {
+ skb_push(skb, ETH_HLEN);
+ br_netpoll_send_skb(to, skb);
+ }
+ return;
+ }
+
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
br_forward_finish);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (skb->dev->npinfo)
- skb->dev->npinfo->netpoll->dev = br->dev;
-#endif
}
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 18b245e2c00e..c03d2c3ff03e 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -147,14 +147,17 @@ static void del_nbp(struct net_bridge_port *p)
list_del_rcu(&p->list);
- rcu_assign_pointer(dev->br_port, NULL);
+ dev->priv_flags &= ~IFF_BRIDGE_PORT;
+
+ netdev_rx_handler_unregister(dev);
br_multicast_del_port(p);
kobject_uevent(&p->kobj, KOBJ_REMOVE);
kobject_del(&p->kobj);
- br_netpoll_disable(br, dev);
+ br_netpoll_disable(p);
+
call_rcu(&p->rcu, destroy_nbp_rcu);
}
@@ -167,8 +170,6 @@ static void del_br(struct net_bridge *br, struct list_head *head)
del_nbp(p);
}
- br_netpoll_cleanup(br->dev);
-
del_timer_sync(&br->gc_timer);
br_sysfs_delbr(br->dev);
@@ -400,7 +401,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
return -ELOOP;
/* Device is already being bridged */
- if (dev->br_port != NULL)
+ if (br_port_exists(dev))
return -EBUSY;
/* No bridging devices that dislike that (e.g. wireless) */
@@ -428,7 +429,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (err)
goto err2;
- rcu_assign_pointer(dev->br_port, p);
+ if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
+ goto err3;
+
+ err = netdev_rx_handler_register(dev, br_handle_frame, p);
+ if (err)
+ goto err3;
+
+ dev->priv_flags |= IFF_BRIDGE_PORT;
+
dev_disable_lro(dev);
list_add_rcu(&p->list, &br->port_list);
@@ -448,9 +457,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
kobject_uevent(&p->kobj, KOBJ_ADD);
- br_netpoll_enable(br, dev);
-
return 0;
+err3:
+ sysfs_remove_link(br->ifobj, p->dev->name);
err2:
br_fdb_delete_by_port(br, p, 1);
err1:
@@ -467,9 +476,13 @@ put_back:
/* called with RTNL */
int br_del_if(struct net_bridge *br, struct net_device *dev)
{
- struct net_bridge_port *p = dev->br_port;
+ struct net_bridge_port *p;
+
+ if (!br_port_exists(dev))
+ return -EINVAL;
- if (!p || p->br != br)
+ p = br_port_get(dev);
+ if (p->br != br)
return -EINVAL;
del_nbp(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index d36e700f7a26..5fc1c5b1c360 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -27,8 +27,10 @@ static int br_pass_frame_up(struct sk_buff *skb)
struct net_bridge *br = netdev_priv(brdev);
struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
+ u64_stats_update_begin(&brstats->syncp);
brstats->rx_packets++;
brstats->rx_bytes += skb->len;
+ u64_stats_update_end(&brstats->syncp);
indev = skb->dev;
skb->dev = brdev;
@@ -41,7 +43,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
int br_handle_frame_finish(struct sk_buff *skb)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
- struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
struct net_bridge *br;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
@@ -111,10 +113,9 @@ drop:
/* note: already called with rcu_read_lock (preempt_disabled) */
static int br_handle_local_finish(struct sk_buff *skb)
{
- struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+ struct net_bridge_port *p = br_port_get_rcu(skb->dev);
- if (p)
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
+ br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
return 0; /* process further */
}
@@ -131,15 +132,19 @@ static inline int is_link_local(const unsigned char *dest)
}
/*
- * Called via br_handle_frame_hook.
* Return NULL if skb is handled
- * note: already called with rcu_read_lock (preempt_disabled)
+ * note: already called with rcu_read_lock (preempt_disabled) from
+ * netif_receive_skb
*/
-struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
+struct sk_buff *br_handle_frame(struct sk_buff *skb)
{
+ struct net_bridge_port *p;
const unsigned char *dest = eth_hdr(skb)->h_dest;
int (*rhook)(struct sk_buff *skb);
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ return skb;
+
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
goto drop;
@@ -147,6 +152,8 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
if (!skb)
return NULL;
+ p = br_port_get_rcu(skb->dev);
+
if (unlikely(is_link_local(dest))) {
/* Pause frames shouldn't be passed up by driver anyway */
if (skb->protocol == htons(ETH_P_PAUSE))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 44420992f72f..84060bc48f11 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -117,26 +117,27 @@ void br_netfilter_rtable_init(struct net_bridge *br)
{
struct rtable *rt = &br->fake_rtable;
- atomic_set(&rt->u.dst.__refcnt, 1);
- rt->u.dst.dev = br->dev;
- rt->u.dst.path = &rt->u.dst;
- rt->u.dst.metrics[RTAX_MTU - 1] = 1500;
- rt->u.dst.flags = DST_NOXFRM;
- rt->u.dst.ops = &fake_dst_ops;
+ atomic_set(&rt->dst.__refcnt, 1);
+ rt->dst.dev = br->dev;
+ rt->dst.path = &rt->dst;
+ rt->dst.metrics[RTAX_MTU - 1] = 1500;
+ rt->dst.flags = DST_NOXFRM;
+ rt->dst.ops = &fake_dst_ops;
}
static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
{
- struct net_bridge_port *port = rcu_dereference(dev->br_port);
-
- return port ? &port->br->fake_rtable : NULL;
+ if (!br_port_exists(dev))
+ return NULL;
+ return &br_port_get_rcu(dev)->br->fake_rtable;
}
static inline struct net_device *bridge_parent(const struct net_device *dev)
{
- struct net_bridge_port *port = rcu_dereference(dev->br_port);
+ if (!br_port_exists(dev))
+ return NULL;
- return port ? port->br->dev : NULL;
+ return br_port_get_rcu(dev)->br->dev;
}
static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
@@ -244,8 +245,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
kfree_skb(skb);
return 0;
}
- dst_hold(&rt->u.dst);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set_noref(skb, &rt->dst);
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
@@ -396,8 +396,7 @@ bridged_dnat:
kfree_skb(skb);
return 0;
}
- dst_hold(&rt->u.dst);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set_noref(skb, &rt->dst);
}
skb->dev = nf_bridge->physindev;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index fe0a79018ab2..4a6a378c84e3 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -120,10 +120,11 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
for_each_netdev(net, dev) {
/* not a bridge port */
- if (dev->br_port == NULL || idx < cb->args[0])
+ if (!br_port_exists(dev) || idx < cb->args[0])
goto skip;
- if (br_fill_ifinfo(skb, dev->br_port, NETLINK_CB(cb->skb).pid,
+ if (br_fill_ifinfo(skb, br_port_get(dev),
+ NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWLINK,
NLM_F_MULTI) < 0)
break;
@@ -168,9 +169,9 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
if (!dev)
return -ENODEV;
- p = dev->br_port;
- if (!p)
+ if (!br_port_exists(dev))
return -EINVAL;
+ p = br_port_get(dev);
/* if kernel STP is running, don't allow changes */
if (p->br->stp_enabled == BR_KERNEL_STP)
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 717e1fd6133c..404d4e14c6a7 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -32,14 +32,15 @@ struct notifier_block br_device_notifier = {
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
- struct net_bridge_port *p = dev->br_port;
+ struct net_bridge_port *p = br_port_get(dev);
struct net_bridge *br;
int err;
/* not a port of a bridge */
- if (p == NULL)
+ if (!br_port_exists(dev))
return NOTIFY_DONE;
+ p = br_port_get(dev);
br = p->br;
switch (event) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0f4a74bc6a9b..3f0678fd1fd0 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -15,6 +15,8 @@
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
+#include <linux/netpoll.h>
+#include <linux/u64_stats_sync.h>
#include <net/route.h>
#define BR_HASH_BITS 8
@@ -143,13 +145,23 @@ struct net_bridge_port
#ifdef CONFIG_SYSFS
char sysfs_name[IFNAMSIZ];
#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *np;
+#endif
};
+#define br_port_get_rcu(dev) \
+ ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
+#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
+#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
+
struct br_cpu_netstats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long tx_packets;
- unsigned long tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
};
struct net_bridge
@@ -273,16 +285,41 @@ extern void br_dev_setup(struct net_device *dev);
extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern void br_netpoll_cleanup(struct net_device *dev);
-extern void br_netpoll_enable(struct net_bridge *br,
- struct net_device *dev);
-extern void br_netpoll_disable(struct net_bridge *br,
- struct net_device *dev);
+static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
+{
+ return br->dev->npinfo;
+}
+
+static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
+ struct sk_buff *skb)
+{
+ struct netpoll *np = p->np;
+
+ if (np)
+ netpoll_send_skb(np, skb);
+}
+
+extern int br_netpoll_enable(struct net_bridge_port *p);
+extern void br_netpoll_disable(struct net_bridge_port *p);
#else
-#define br_netpoll_cleanup(br)
-#define br_netpoll_enable(br, dev)
-#define br_netpoll_disable(br, dev)
+static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
+{
+ return NULL;
+}
+
+static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
+ struct sk_buff *skb)
+{
+}
+static inline int br_netpoll_enable(struct net_bridge_port *p)
+{
+ return 0;
+}
+
+static inline void br_netpoll_disable(struct net_bridge_port *p)
+{
+}
#endif
/* br_fdb.c */
@@ -331,8 +368,7 @@ extern void br_features_recompute(struct net_bridge *br);
/* br_input.c */
extern int br_handle_frame_finish(struct sk_buff *skb);
-extern struct sk_buff *br_handle_frame(struct net_bridge_port *p,
- struct sk_buff *skb);
+extern struct sk_buff *br_handle_frame(struct sk_buff *skb);
/* br_ioctl.c */
extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 217bd225a42f..70aecb48fb69 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -137,12 +137,13 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
- struct net_bridge_port *p = rcu_dereference(dev->br_port);
+ struct net_bridge_port *p;
struct net_bridge *br;
const unsigned char *buf;
- if (!p)
+ if (!br_port_exists(dev))
goto err;
+ p = br_port_get_rcu(dev);
if (!pskb_may_pull(skb, 4))
goto err;
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 9e19166ba453..46624bb6d9be 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -24,8 +24,9 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
return EBT_DROP;
if (par->hooknum != NF_BR_BROUTING)
+ /* rcu_read_lock()ed by nf_hook_slow */
memcpy(eth_hdr(skb)->h_dest,
- par->in->br_port->br->dev->dev_addr, ETH_ALEN);
+ br_port_get_rcu(par->in)->br->dev->dev_addr, ETH_ALEN);
else
memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN);
skb->pkt_type = PACKET_HOST;
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ae3c7cef1484..26377e96fa1c 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -177,8 +177,9 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
if (in) {
strcpy(pm->physindev, in->name);
/* If in isn't a bridge, then physindev==indev */
- if (in->br_port)
- strcpy(pm->indev, in->br_port->br->dev->name);
+ if (br_port_exists(in))
+ /* rcu_read_lock()ed by nf_hook_slow */
+ strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
else
strcpy(pm->indev, in->name);
} else
@@ -187,7 +188,8 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
if (out) {
/* If out exists, then out is a bridge port */
strcpy(pm->physoutdev, out->name);
- strcpy(pm->outdev, out->br_port->br->dev->name);
+ /* rcu_read_lock()ed by nf_hook_slow */
+ strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
} else
pm->outdev[0] = pm->physoutdev[0] = '\0';
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 59ca00e40dec..bcc102e3be4d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -140,11 +140,14 @@ ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
return 1;
if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
return 1;
- if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
- e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
+ /* rcu_read_lock()ed by nf_hook_slow */
+ if (in && br_port_exists(in) &&
+ FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
+ EBT_ILOGICALIN))
return 1;
- if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
- e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
+ if (out && br_port_exists(out) &&
+ FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
+ EBT_ILOGICALOUT))
return 1;
if (e->bitmask & EBT_SOURCEMAC) {
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 6f36580366f0..76ae68303d3a 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -80,6 +80,11 @@ int connect_req_to_link_param(struct cfcnfg *cnfg,
l->u.utility.paramlen);
break;
+ case CAIFPROTO_DEBUG:
+ l->linktype = CFCTRL_SRV_DBG;
+ l->endpoint = s->sockaddr.u.dbg.service;
+ l->chtype = s->sockaddr.u.dbg.type;
+ break;
default:
return -EINVAL;
}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index e2b86f1f5a47..0b586e9d1378 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -255,7 +255,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
pref = CFPHYPREF_HIGH_BW;
break;
}
-
+ dev_hold(dev);
cfcnfg_add_phy_layer(get_caif_conf(),
phy_type,
dev,
@@ -285,6 +285,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
caifd->layer.up->ctrlcmd(caifd->layer.up,
_CAIF_CTRLCMD_PHYIF_DOWN_IND,
caifd->layer.id);
+ might_sleep();
res = wait_event_interruptible_timeout(caifd->event,
atomic_read(&caifd->in_use) == 0,
TIMEOUT);
@@ -300,6 +301,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
"Unregistering an active CAIF device: %s\n",
__func__, dev->name);
cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
+ dev_put(dev);
atomic_set(&caifd->state, what);
break;
@@ -326,7 +328,8 @@ struct cfcnfg *get_caif_conf(void)
EXPORT_SYMBOL(get_caif_conf);
int caif_connect_client(struct caif_connect_request *conn_req,
- struct cflayer *client_layer)
+ struct cflayer *client_layer, int *ifindex,
+ int *headroom, int *tailroom)
{
struct cfctrl_link_param param;
int ret;
@@ -334,8 +337,9 @@ int caif_connect_client(struct caif_connect_request *conn_req,
if (ret)
return ret;
/* Hook up the adaptation layer. */
- return cfcnfg_add_adaptation_layer(get_caif_conf(),
- &param, client_layer);
+ return cfcnfg_add_adaptation_layer(get_caif_conf(), &param,
+ client_layer, ifindex,
+ headroom, tailroom);
}
EXPORT_SYMBOL(caif_connect_client);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3d0e09584fae..8ce904786116 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -28,8 +28,8 @@
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(AF_CAIF);
-#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10)
-#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100)
+#define CAIF_DEF_SNDBUF (4096*10)
+#define CAIF_DEF_RCVBUF (4096*100)
/*
* CAIF state is re-using the TCP socket states.
@@ -76,6 +76,7 @@ struct caifsock {
struct caif_connect_request conn_req;
struct mutex readlock;
struct dentry *debugfs_socket_dir;
+ int headroom, tailroom, maxframe;
};
static int rx_flow_is_on(struct caifsock *cf_sk)
@@ -594,27 +595,32 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto err;
noblock = msg->msg_flags & MSG_DONTWAIT;
- buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
-
- ret = -EMSGSIZE;
- if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
- goto err;
-
timeo = sock_sndtimeo(sk, noblock);
timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
1, timeo, &ret);
+ if (ret)
+ goto err;
ret = -EPIPE;
if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
sock_flag(sk, SOCK_DEAD) ||
(sk->sk_shutdown & RCV_SHUTDOWN))
goto err;
+ /* Error if trying to write more than maximum frame size. */
+ ret = -EMSGSIZE;
+ if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
+ goto err;
+
+ buffer_size = len + cf_sk->headroom + cf_sk->tailroom;
+
ret = -ENOMEM;
skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
- if (!skb)
+
+ if (!skb || skb_tailroom(skb) < buffer_size)
goto err;
- skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+
+ skb_reserve(skb, cf_sk->headroom);
ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
@@ -645,7 +651,6 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
long timeo;
err = -EOPNOTSUPP;
-
if (unlikely(msg->msg_flags&MSG_OOB))
goto out_err;
@@ -662,8 +667,8 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
size = len-sent;
- if (size > CAIF_MAX_PAYLOAD_SIZE)
- size = CAIF_MAX_PAYLOAD_SIZE;
+ if (size > cf_sk->maxframe)
+ size = cf_sk->maxframe;
/* If size is more than half of sndbuf, chop up message */
if (size > ((sk->sk_sndbuf >> 1) - 64))
@@ -673,14 +678,14 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
size = SKB_MAX_ALLOC;
skb = sock_alloc_send_skb(sk,
- size + CAIF_NEEDED_HEADROOM
- + CAIF_NEEDED_TAILROOM,
+ size + cf_sk->headroom +
+ cf_sk->tailroom,
msg->msg_flags&MSG_DONTWAIT,
&err);
if (skb == NULL)
goto out_err;
- skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+ skb_reserve(skb, cf_sk->headroom);
/*
* If you pass two values to the sock_alloc_send_skb
* it tries to grab the large buffer with GFP_NOFS
@@ -821,17 +826,15 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
long timeo;
int err;
+ int ifindex, headroom, tailroom;
+ struct net_device *dev;
+
lock_sock(sk);
err = -EAFNOSUPPORT;
if (uaddr->sa_family != AF_CAIF)
goto out;
- err = -ESOCKTNOSUPPORT;
- if (unlikely(!(sk->sk_type == SOCK_STREAM &&
- cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
- sk->sk_type != SOCK_SEQPACKET))
- goto out;
switch (sock->state) {
case SS_UNCONNECTED:
/* Normal case, a fresh connect */
@@ -874,8 +877,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
sk_stream_kill_queues(&cf_sk->sk);
err = -EINVAL;
- if (addr_len != sizeof(struct sockaddr_caif) ||
- !uaddr)
+ if (addr_len != sizeof(struct sockaddr_caif))
goto out;
memcpy(&cf_sk->conn_req.sockaddr, uaddr,
@@ -888,12 +890,23 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
dbfs_atomic_inc(&cnt.num_connect_req);
cf_sk->layer.receive = caif_sktrecv_cb;
err = caif_connect_client(&cf_sk->conn_req,
- &cf_sk->layer);
+ &cf_sk->layer, &ifindex, &headroom, &tailroom);
if (err < 0) {
cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
cf_sk->sk.sk_state = CAIF_DISCONNECTED;
goto out;
}
+ dev = dev_get_by_index(sock_net(sk), ifindex);
+ cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
+ cf_sk->tailroom = tailroom;
+ cf_sk->maxframe = dev->mtu - (headroom + tailroom);
+ dev_put(dev);
+ if (cf_sk->maxframe < 1) {
+ pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n",
+ __func__, dev->mtu);
+ err = -ENODEV;
+ goto out;
+ }
err = -EINPROGRESS;
wait_connect:
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index df43f264d9fb..1c29189b344d 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/slab.h>
+#include <linux/netdevice.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfcnfg.h>
@@ -22,6 +23,7 @@
#define PHY_NAME_LEN 20
#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
+#define RFM_FRAGMENT_SIZE 4030
/* Information about CAIF physical interfaces held by Config Module in order
* to manage physical interfaces
@@ -41,6 +43,15 @@ struct cfcnfg_phyinfo {
/* Information about the physical device */
struct dev_info dev_info;
+
+ /* Interface index */
+ int ifindex;
+
+ /* Use Start of frame extension */
+ bool use_stx;
+
+ /* Use Start of frame checksum */
+ bool use_fcs;
};
struct cfcnfg {
@@ -248,9 +259,20 @@ static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
{
}
+int protohead[CFCTRL_SRV_MASK] = {
+ [CFCTRL_SRV_VEI] = 4,
+ [CFCTRL_SRV_DATAGRAM] = 7,
+ [CFCTRL_SRV_UTIL] = 4,
+ [CFCTRL_SRV_RFM] = 3,
+ [CFCTRL_SRV_DBG] = 3,
+};
+
int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
struct cfctrl_link_param *param,
- struct cflayer *adap_layer)
+ struct cflayer *adap_layer,
+ int *ifindex,
+ int *proto_head,
+ int *proto_tail)
{
struct cflayer *frml;
if (adap_layer == NULL) {
@@ -276,6 +298,14 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
param->phyid);
caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
param->phyid);
+
+ *ifindex = cnfg->phy_layers[param->phyid].ifindex;
+ *proto_head =
+ protohead[param->linktype]+
+ (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0);
+
+ *proto_tail = 2;
+
/* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
cfctrl_enum_req(cnfg->ctrl, param->phyid);
return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
@@ -297,6 +327,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
struct cfcnfg *cnfg = container_obj(layer);
struct cflayer *servicel = NULL;
struct cfcnfg_phyinfo *phyinfo;
+ struct net_device *netdev;
+
if (adapt_layer == NULL) {
pr_debug("CAIF: %s(): link setup response "
"but no client exist, send linkdown back\n",
@@ -308,19 +340,15 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
caif_assert(cnfg != NULL);
caif_assert(phyid != 0);
phyinfo = &cnfg->phy_layers[phyid];
- caif_assert(phyinfo != NULL);
caif_assert(phyinfo->id == phyid);
caif_assert(phyinfo->phy_layer != NULL);
caif_assert(phyinfo->phy_layer->id == phyid);
- if (phyinfo != NULL &&
- phyinfo->phy_ref_count++ == 0 &&
- phyinfo->phy_layer != NULL &&
+ phyinfo->phy_ref_count++;
+ if (phyinfo->phy_ref_count == 1 &&
phyinfo->phy_layer->modemcmd != NULL) {
- caif_assert(phyinfo->phy_layer->id == phyid);
phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
_CAIF_MODEMCMD_PHYIF_USEFULL);
-
}
adapt_layer->id = channel_id;
@@ -332,7 +360,9 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
servicel = cfdgml_create(channel_id, &phyinfo->dev_info);
break;
case CFCTRL_SRV_RFM:
- servicel = cfrfml_create(channel_id, &phyinfo->dev_info);
+ netdev = phyinfo->dev_info.dev;
+ servicel = cfrfml_create(channel_id, &phyinfo->dev_info,
+ netdev->mtu);
break;
case CFCTRL_SRV_UTIL:
servicel = cfutill_create(channel_id, &phyinfo->dev_info);
@@ -363,8 +393,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
void
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
- void *dev, struct cflayer *phy_layer, u16 *phyid,
- enum cfcnfg_phy_preference pref,
+ struct net_device *dev, struct cflayer *phy_layer,
+ u16 *phyid, enum cfcnfg_phy_preference pref,
bool fcs, bool stx)
{
struct cflayer *frml;
@@ -418,6 +448,10 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
cnfg->phy_layers[*phyid].dev_info.dev = dev;
cnfg->phy_layers[*phyid].phy_layer = phy_layer;
cnfg->phy_layers[*phyid].phy_ref_count = 0;
+ cnfg->phy_layers[*phyid].ifindex = dev->ifindex;
+ cnfg->phy_layers[*phyid].use_stx = stx;
+ cnfg->phy_layers[*phyid].use_fcs = fcs;
+
phy_layer->type = phy_type;
frml = cffrml_create(*phyid, fcs);
if (!frml) {
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index fcfda98a5e6d..563145fdc4c3 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -19,7 +19,7 @@
#ifdef CAIF_NO_LOOP
static int handle_loop(struct cfctrl *ctrl,
int cmd, struct cfpkt *pkt){
- return CAIF_FAILURE;
+ return -1;
}
#else
static int handle_loop(struct cfctrl *ctrl,
@@ -43,7 +43,7 @@ struct cflayer *cfctrl_create(void)
memset(&dev_info, 0, sizeof(dev_info));
dev_info.id = 0xff;
memset(this, 0, sizeof(*this));
- cfsrvl_init(&this->serv, 0, &dev_info);
+ cfsrvl_init(&this->serv, 0, &dev_info, false);
atomic_set(&this->req_seq_no, 1);
atomic_set(&this->rsp_seq_no, 1);
this->serv.layer.receive = cfctrl_recv;
@@ -395,7 +395,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
cmd = cmdrsp & CFCTRL_CMD_MASK;
if (cmd != CFCTRL_CMD_LINK_ERR
&& CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
- if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE)
+ if (handle_loop(cfctrl, cmd, pkt) != 0)
cmdrsp |= CFCTRL_ERR_BIT;
}
@@ -647,6 +647,6 @@ found:
default:
break;
}
- return CAIF_SUCCESS;
+ return 0;
}
#endif
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index ab6b6dc34cf8..676648cac8dd 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -22,7 +22,7 @@ struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(dbg, 0, sizeof(struct cfsrvl));
- cfsrvl_init(dbg, channel_id, dev_info);
+ cfsrvl_init(dbg, channel_id, dev_info, false);
dbg->layer.receive = cfdbgl_receive;
dbg->layer.transmit = cfdbgl_transmit;
snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 53194840ecb6..ed9d53aff280 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -17,6 +17,7 @@
#define DGM_FLOW_OFF 0x81
#define DGM_FLOW_ON 0x80
#define DGM_CTRL_PKT_SIZE 1
+#define DGM_MTU 1500
static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
@@ -30,7 +31,7 @@ struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(dgm, 0, sizeof(struct cfsrvl));
- cfsrvl_init(dgm, channel_id, dev_info);
+ cfsrvl_init(dgm, channel_id, dev_info, true);
dgm->layer.receive = cfdgml_receive;
dgm->layer.transmit = cfdgml_transmit;
snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
@@ -89,6 +90,10 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (!cfsrvl_ready(service, &ret))
return ret;
+ /* STE Modem cannot handle more than 1500 bytes datagrams */
+ if (cfpkt_getlen(pkt) > DGM_MTU)
+ return -EMSGSIZE;
+
cfpkt_add_head(pkt, &zero, 4);
/* Add info for MUX-layer to route the packet out. */
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index a6fdf899741a..01f238ff2346 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -9,8 +9,8 @@
#include <linux/hardirq.h>
#include <net/caif/cfpkt.h>
-#define PKT_PREFIX CAIF_NEEDED_HEADROOM
-#define PKT_POSTFIX CAIF_NEEDED_TAILROOM
+#define PKT_PREFIX 16
+#define PKT_POSTFIX 2
#define PKT_LEN_WHEN_EXTENDING 128
#define PKT_ERROR(pkt, errmsg) do { \
cfpkt_priv(pkt)->erronous = true; \
@@ -338,7 +338,6 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
u16 dstlen;
u16 createlen;
if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
- cfpkt_destroy(addpkt);
return dstpkt;
}
if (expectlen > addlen)
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index fd27b172fb5d..4b04d25b6a3f 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -7,102 +7,304 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/unaligned/le_byteshift.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
-#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
-
+#define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
#define RFM_SEGMENTATION_BIT 0x01
-#define RFM_PAYLOAD 0x00
-#define RFM_CMD_BIT 0x80
-#define RFM_FLOW_OFF 0x81
-#define RFM_FLOW_ON 0x80
-#define RFM_SET_PIN 0x82
-#define RFM_CTRL_PKT_SIZE 1
+#define RFM_HEAD_SIZE 7
static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
-static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl);
-struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info)
+struct cfrfml {
+ struct cfsrvl serv;
+ struct cfpkt *incomplete_frm;
+ int fragment_size;
+ u8 seghead[6];
+ u16 pdu_size;
+ /* Protects serialized processing of packets */
+ spinlock_t sync;
+};
+
+static void cfrfml_release(struct kref *kref)
+{
+ struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref);
+ struct cfrfml *rfml = container_obj(&srvl->layer);
+
+ if (rfml->incomplete_frm)
+ cfpkt_destroy(rfml->incomplete_frm);
+
+ kfree(srvl);
+}
+
+struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
+ int mtu_size)
{
- struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!rfm) {
+ int tmp;
+ struct cfrfml *this =
+ kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
+
+ if (!this) {
pr_warning("CAIF: %s(): Out of memory\n", __func__);
return NULL;
}
- caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(rfm, 0, sizeof(struct cfsrvl));
- cfsrvl_init(rfm, channel_id, dev_info);
- rfm->layer.modemcmd = cfservl_modemcmd;
- rfm->layer.receive = cfrfml_receive;
- rfm->layer.transmit = cfrfml_transmit;
- snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id);
- return &rfm->layer;
+
+ cfsrvl_init(&this->serv, channel_id, dev_info, false);
+ this->serv.release = cfrfml_release;
+ this->serv.layer.receive = cfrfml_receive;
+ this->serv.layer.transmit = cfrfml_transmit;
+
+ /* Round down to closest multiple of 16 */
+ tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
+ tmp *= 16;
+
+ this->fragment_size = tmp;
+ spin_lock_init(&this->sync);
+ snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
+ "rfm%d", channel_id);
+
+ return &this->serv.layer;
}
-static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
+ struct cfpkt *pkt, int *err)
{
- return -EPROTO;
+ struct cfpkt *tmppkt;
+ *err = -EPROTO;
+ /* n-th but not last segment */
+
+ if (cfpkt_extr_head(pkt, seghead, 6) < 0)
+ return NULL;
+
+ /* Verify correct header */
+ if (memcmp(seghead, rfml->seghead, 6) != 0)
+ return NULL;
+
+ tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
+ rfml->pdu_size + RFM_HEAD_SIZE);
+
+ /* If cfpkt_append failes input pkts are not freed */
+ *err = -ENOMEM;
+ if (tmppkt == NULL)
+ return NULL;
+
+ *err = 0;
+ return tmppkt;
}
static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
{
u8 tmp;
bool segmented;
- int ret;
+ int err;
+ u8 seghead[6];
+ struct cfrfml *rfml;
+ struct cfpkt *tmppkt = NULL;
+
caif_assert(layr->up != NULL);
caif_assert(layr->receive != NULL);
+ rfml = container_obj(layr);
+ spin_lock(&rfml->sync);
+
+ err = -EPROTO;
+ if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
+ goto out;
+ segmented = tmp & RFM_SEGMENTATION_BIT;
+
+ if (segmented) {
+ if (rfml->incomplete_frm == NULL) {
+ /* Initial Segment */
+ if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
+ goto out;
+
+ rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
+
+ if (cfpkt_erroneous(pkt))
+ goto out;
+ rfml->incomplete_frm = pkt;
+ pkt = NULL;
+ } else {
+
+ tmppkt = rfm_append(rfml, seghead, pkt, &err);
+ if (tmppkt == NULL)
+ goto out;
+
+ if (cfpkt_erroneous(tmppkt))
+ goto out;
+
+ rfml->incomplete_frm = tmppkt;
+
+
+ if (cfpkt_erroneous(tmppkt))
+ goto out;
+ }
+ err = 0;
+ goto out;
+ }
+
+ if (rfml->incomplete_frm) {
+
+ /* Last Segment */
+ tmppkt = rfm_append(rfml, seghead, pkt, &err);
+ if (tmppkt == NULL)
+ goto out;
+
+ if (cfpkt_erroneous(tmppkt))
+ goto out;
+
+ rfml->incomplete_frm = NULL;
+ pkt = tmppkt;
+ tmppkt = NULL;
+
+ /* Verify that length is correct */
+ err = EPROTO;
+ if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
+ goto out;
+ }
+
+ err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
+
+out:
+
+ if (err != 0) {
+ if (tmppkt)
+ cfpkt_destroy(tmppkt);
+ if (pkt)
+ cfpkt_destroy(pkt);
+ if (rfml->incomplete_frm)
+ cfpkt_destroy(rfml->incomplete_frm);
+ rfml->incomplete_frm = NULL;
+
+ pr_info("CAIF: %s(): "
+ "Connection error %d triggered on RFM link\n",
+ __func__, err);
+
+ /* Trigger connection error upon failure.*/
+ layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
+ rfml->serv.dev_info.id);
+ }
+ spin_unlock(&rfml->sync);
+ return err;
+}
+
+
+static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
+{
+ caif_assert(!cfpkt_getlen(pkt) < rfml->fragment_size);
+
+ /* Add info for MUX-layer to route the packet out. */
+ cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
/*
- * RFM is taking care of segmentation and stripping of
- * segmentation bit.
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
*/
- if (cfpkt_extr_head(pkt, &tmp, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
- cfpkt_destroy(pkt);
- return -EPROTO;
- }
- segmented = tmp & RFM_SEGMENTATION_BIT;
- caif_assert(!segmented);
+ cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
+ cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
- ret = layr->up->receive(layr->up, pkt);
- return ret;
+ return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
}
static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
- u8 tmp = 0;
- int ret;
- struct cfsrvl *service = container_obj(layr);
+ int err;
+ u8 seg;
+ u8 head[6];
+ struct cfpkt *rearpkt = NULL;
+ struct cfpkt *frontpkt = pkt;
+ struct cfrfml *rfml = container_obj(layr);
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
- if (!cfsrvl_ready(service, &ret))
- return ret;
+ if (!cfsrvl_ready(&rfml->serv, &err))
+ return err;
+
+ err = -EPROTO;
+ if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
+ goto out;
+
+ err = 0;
+ if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
+ err = cfpkt_peek_head(pkt, head, 6);
+
+ if (err < 0)
+ goto out;
+
+ while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
+
+ seg = 1;
+ err = -EPROTO;
+
+ if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
+ goto out;
+ /*
+ * On OOM error cfpkt_split returns NULL.
+ *
+ * NOTE: Segmented pdu is not correctly aligned.
+ * This has negative performance impact.
+ */
+
+ rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
+ if (rearpkt == NULL)
+ goto out;
+
+ err = cfrfml_transmit_segment(rfml, frontpkt);
+
+ if (err != 0)
+ goto out;
+ frontpkt = rearpkt;
+ rearpkt = NULL;
+
+ err = -ENOMEM;
+ if (frontpkt == NULL)
+ goto out;
+ err = -EPROTO;
+ if (cfpkt_add_head(frontpkt, head, 6) < 0)
+ goto out;
- if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
- pr_err("CAIF: %s():Packet too large - size=%d\n",
- __func__, cfpkt_getlen(pkt));
- return -EOVERFLOW;
}
- if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
- return -EPROTO;
+
+ seg = 0;
+ err = -EPROTO;
+
+ if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
+ goto out;
+
+ err = cfrfml_transmit_segment(rfml, frontpkt);
+
+ frontpkt = NULL;
+out:
+
+ if (err != 0) {
+ pr_info("CAIF: %s(): "
+ "Connection error %d triggered on RFM link\n",
+ __func__, err);
+ /* Trigger connection error upon failure.*/
+
+ layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
+ rfml->serv.dev_info.id);
+
+ if (rearpkt)
+ cfpkt_destroy(rearpkt);
+
+ if (frontpkt && frontpkt != pkt) {
+
+ cfpkt_destroy(frontpkt);
+ /*
+ * Socket layer will free the original packet,
+ * but this packet may already be sent and
+ * freed. So we have to return 0 in this case
+ * to avoid socket layer to re-free this packet.
+ * The return of shutdown indication will
+ * cause connection to be invalidated anyhow.
+ */
+ err = 0;
+ }
}
- /* Add info for MUX-layer to route the packet out. */
- cfpkt_info(pkt)->channel_id = service->layer.id;
- /*
- * To optimize alignment, we add up the size of CAIF header before
- * payload.
- */
- cfpkt_info(pkt)->hdr_len = 1;
- cfpkt_info(pkt)->dev_info = &service->dev_info;
- ret = layr->dn->transmit(layr->dn, pkt);
- if (ret < 0)
- cfpkt_extr_head(pkt, &tmp, 1);
- return ret;
+ return err;
}
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 965c5baace40..a11fbd68a13d 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -14,7 +14,8 @@
#define container_obj(layr) ((struct cfserl *) layr)
#define CFSERL_STX 0x02
-#define CAIF_MINIUM_PACKET_SIZE 4
+#define SERIAL_MINIUM_PACKET_SIZE 4
+#define SERIAL_MAX_FRAMESIZE 4096
struct cfserl {
struct cflayer layer;
struct cfpkt *incomplete_frm;
@@ -119,8 +120,8 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
/*
* Frame error handling
*/
- if (expectlen < CAIF_MINIUM_PACKET_SIZE
- || expectlen > CAIF_MAX_FRAMESIZE) {
+ if (expectlen < SERIAL_MINIUM_PACKET_SIZE
+ || expectlen > SERIAL_MAX_FRAMESIZE) {
if (!layr->usestx) {
if (pkt != NULL)
cfpkt_destroy(pkt);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 6e5b7079a684..f40939a91211 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -24,8 +24,10 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfsrvl *service = container_obj(layr);
+
caif_assert(layr->up != NULL);
caif_assert(layr->up->ctrlcmd != NULL);
+
switch (ctrl) {
case CAIF_CTRLCMD_INIT_RSP:
service->open = true;
@@ -89,9 +91,14 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
{
struct cfsrvl *service = container_obj(layr);
+
caif_assert(layr != NULL);
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
+
+ if (!service->supports_flowctrl)
+ return 0;
+
switch (ctrl) {
case CAIF_MODEMCMD_FLOW_ON_REQ:
{
@@ -152,9 +159,17 @@ void cfservl_destroy(struct cflayer *layer)
kfree(layer);
}
+void cfsrvl_release(struct kref *kref)
+{
+ struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
+ kfree(service);
+}
+
void cfsrvl_init(struct cfsrvl *service,
- u8 channel_id,
- struct dev_info *dev_info)
+ u8 channel_id,
+ struct dev_info *dev_info,
+ bool supports_flowctrl
+ )
{
caif_assert(offsetof(struct cfsrvl, layer) == 0);
service->open = false;
@@ -164,14 +179,11 @@ void cfsrvl_init(struct cfsrvl *service,
service->layer.ctrlcmd = cfservl_ctrlcmd;
service->layer.modemcmd = cfservl_modemcmd;
service->dev_info = *dev_info;
+ service->supports_flowctrl = supports_flowctrl;
+ service->release = cfsrvl_release;
kref_init(&service->ref);
}
-void cfsrvl_release(struct kref *kref)
-{
- struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
- kfree(service);
-}
bool cfsrvl_ready(struct cfsrvl *service, int *err)
{
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 5fd2c9ea8b42..02795aff57a4 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -31,7 +31,7 @@ struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(util, 0, sizeof(struct cfsrvl));
- cfsrvl_init(util, channel_id, dev_info);
+ cfsrvl_init(util, channel_id, dev_info, true);
util->layer.receive = cfutill_receive;
util->layer.transmit = cfutill_transmit;
snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
@@ -90,12 +90,6 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (!cfsrvl_ready(service, &ret))
return ret;
- if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
- pr_err("CAIF: %s(): packet too large size=%d\n",
- __func__, cfpkt_getlen(pkt));
- return -EOVERFLOW;
- }
-
cfpkt_add_head(pkt, &zero, 1);
/* Add info for MUX-layer to route the packet out. */
info = cfpkt_info(pkt);
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index e04f7d964e83..77cc09faac9a 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -30,7 +30,7 @@ struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(vei, 0, sizeof(struct cfsrvl));
- cfsrvl_init(vei, channel_id, dev_info);
+ cfsrvl_init(vei, channel_id, dev_info, true);
vei->layer.receive = cfvei_receive;
vei->layer.transmit = cfvei_transmit;
snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
@@ -84,11 +84,6 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
return ret;
caif_assert(layr->dn != NULL);
caif_assert(layr->dn->transmit != NULL);
- if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
- pr_warning("CAIF: %s(): Packet too large - size=%d\n",
- __func__, cfpkt_getlen(pkt));
- return -EOVERFLOW;
- }
if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index 89ad4ea239f1..ada6ee2d48f5 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -27,7 +27,7 @@ struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
caif_assert(offsetof(struct cfsrvl, layer) == 0);
memset(vid, 0, sizeof(struct cfsrvl));
- cfsrvl_init(vid, channel_id, dev_info);
+ cfsrvl_init(vid, channel_id, dev_info, false);
vid->layer.receive = cfvidl_receive;
vid->layer.transmit = cfvidl_transmit;
snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 610966abe2dc..4293e190ec53 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -23,7 +23,7 @@
#include <net/caif/caif_dev.h>
/* GPRS PDP connection has MTU to 1500 */
-#define SIZE_MTU 1500
+#define GPRS_PDP_MTU 1500
/* 5 sec. connect timeout */
#define CONNECT_TIMEOUT (5 * HZ)
#define CAIF_NET_DEFAULT_QUEUE_LEN 500
@@ -232,6 +232,8 @@ static int chnl_net_open(struct net_device *dev)
{
struct chnl_net *priv = NULL;
int result = -1;
+ int llifindex, headroom, tailroom, mtu;
+ struct net_device *lldev;
ASSERT_RTNL();
priv = netdev_priv(dev);
if (!priv) {
@@ -241,41 +243,88 @@ static int chnl_net_open(struct net_device *dev)
if (priv->state != CAIF_CONNECTING) {
priv->state = CAIF_CONNECTING;
- result = caif_connect_client(&priv->conn_req, &priv->chnl);
+ result = caif_connect_client(&priv->conn_req, &priv->chnl,
+ &llifindex, &headroom, &tailroom);
if (result != 0) {
- priv->state = CAIF_DISCONNECTED;
pr_debug("CAIF: %s(): err: "
"Unable to register and open device,"
" Err:%d\n",
__func__,
result);
- return result;
+ goto error;
+ }
+
+ lldev = dev_get_by_index(dev_net(dev), llifindex);
+
+ if (lldev == NULL) {
+ pr_debug("CAIF: %s(): no interface?\n", __func__);
+ result = -ENODEV;
+ goto error;
+ }
+
+ dev->needed_tailroom = tailroom + lldev->needed_tailroom;
+ dev->hard_header_len = headroom + lldev->hard_header_len +
+ lldev->needed_tailroom;
+
+ /*
+ * MTU, head-room etc is not know before we have a
+ * CAIF link layer device available. MTU calculation may
+ * override initial RTNL configuration.
+ * MTU is minimum of current mtu, link layer mtu pluss
+ * CAIF head and tail, and PDP GPRS contexts max MTU.
+ */
+ mtu = min_t(int, dev->mtu, lldev->mtu - (headroom + tailroom));
+ mtu = min_t(int, GPRS_PDP_MTU, mtu);
+ dev_set_mtu(dev, mtu);
+ dev_put(lldev);
+
+ if (mtu < 100) {
+ pr_warning("CAIF: %s(): "
+ "CAIF Interface MTU too small (%d)\n",
+ __func__, mtu);
+ result = -ENODEV;
+ goto error;
}
}
+ rtnl_unlock(); /* Release RTNL lock during connect wait */
+
result = wait_event_interruptible_timeout(priv->netmgmt_wq,
priv->state != CAIF_CONNECTING,
CONNECT_TIMEOUT);
+ rtnl_lock();
+
if (result == -ERESTARTSYS) {
pr_debug("CAIF: %s(): wait_event_interruptible"
" woken by a signal\n", __func__);
- return -ERESTARTSYS;
+ result = -ERESTARTSYS;
+ goto error;
}
+
if (result == 0) {
pr_debug("CAIF: %s(): connect timeout\n", __func__);
caif_disconnect_client(&priv->chnl);
priv->state = CAIF_DISCONNECTED;
pr_debug("CAIF: %s(): state disconnected\n", __func__);
- return -ETIMEDOUT;
+ result = -ETIMEDOUT;
+ goto error;
}
if (priv->state != CAIF_CONNECTED) {
pr_debug("CAIF: %s(): connect failed\n", __func__);
- return -ECONNREFUSED;
+ result = -ECONNREFUSED;
+ goto error;
}
pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
return 0;
+
+error:
+ caif_disconnect_client(&priv->chnl);
+ priv->state = CAIF_DISCONNECTED;
+ pr_debug("CAIF: %s(): state disconnected\n", __func__);
+ return result;
+
}
static int chnl_net_stop(struct net_device *dev)
@@ -321,9 +370,7 @@ static void ipcaif_net_setup(struct net_device *dev)
dev->destructor = free_netdev;
dev->flags |= IFF_NOARP;
dev->flags |= IFF_POINTOPOINT;
- dev->needed_headroom = CAIF_NEEDED_HEADROOM;
- dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
- dev->mtu = SIZE_MTU;
+ dev->mtu = GPRS_PDP_MTU;
dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
priv = netdev_priv(dev);
diff --git a/net/can/raw.c b/net/can/raw.c
index da99cf153b33..ccfe633eec8e 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -436,14 +436,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
if (count > 1) {
/* filter does not fit into dfilter => alloc space */
- filter = kmalloc(optlen, GFP_KERNEL);
- if (!filter)
- return -ENOMEM;
-
- if (copy_from_user(filter, optval, optlen)) {
- kfree(filter);
- return -EFAULT;
- }
+ filter = memdup_user(optval, optlen);
+ if (IS_ERR(filter))
+ return PTR_ERR(filter);
} else if (count == 1) {
if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
return -EFAULT;
diff --git a/net/compat.c b/net/compat.c
index ec24d9edb025..63d260e81472 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -81,7 +81,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
int tot_len;
if (kern_msg->msg_namelen) {
- if (mode==VERIFY_READ) {
+ if (mode == VERIFY_READ) {
int err = move_addr_to_kernel(kern_msg->msg_name,
kern_msg->msg_namelen,
kern_address);
@@ -354,7 +354,7 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
static int do_set_sock_timeout(struct socket *sock, int level,
int optname, char __user *optval, unsigned int optlen)
{
- struct compat_timeval __user *up = (struct compat_timeval __user *) optval;
+ struct compat_timeval __user *up = (struct compat_timeval __user *)optval;
struct timeval ktime;
mm_segment_t old_fs;
int err;
@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
- err = sock_setsockopt(sock, level, optname, (char *) &ktime, sizeof(ktime));
+ err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
set_fs(old_fs);
return err;
@@ -389,11 +389,10 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
- struct socket *sock;
+ struct socket *sock = sockfd_lookup(fd, &err);
- if ((sock = sockfd_lookup(fd, &err))!=NULL)
- {
- err = security_socket_setsockopt(sock,level,optname);
+ if (sock) {
+ err = security_socket_setsockopt(sock, level, optname);
if (err) {
sockfd_put(sock);
return err;
@@ -453,7 +452,7 @@ static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
struct compat_timeval __user *ctv =
- (struct compat_timeval __user*) userstamp;
+ (struct compat_timeval __user *) userstamp;
int err = -ENOENT;
struct timeval tv;
@@ -477,7 +476,7 @@ EXPORT_SYMBOL(compat_sock_get_timestamp);
int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
{
struct compat_timespec __user *ctv =
- (struct compat_timespec __user*) userstamp;
+ (struct compat_timespec __user *) userstamp;
int err = -ENOENT;
struct timespec ts;
@@ -502,12 +501,10 @@ asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
char __user *optval, int __user *optlen)
{
int err;
- struct socket *sock;
+ struct socket *sock = sockfd_lookup(fd, &err);
- if ((sock = sockfd_lookup(fd, &err))!=NULL)
- {
- err = security_socket_getsockopt(sock, level,
- optname);
+ if (sock) {
+ err = security_socket_getsockopt(sock, level, optname);
if (err) {
sockfd_put(sock);
return err;
@@ -531,7 +528,7 @@ struct compat_group_req {
__u32 gr_interface;
struct __kernel_sockaddr_storage gr_group
__attribute__ ((aligned(4)));
-} __attribute__ ((packed));
+} __packed;
struct compat_group_source_req {
__u32 gsr_interface;
@@ -539,7 +536,7 @@ struct compat_group_source_req {
__attribute__ ((aligned(4)));
struct __kernel_sockaddr_storage gsr_source
__attribute__ ((aligned(4)));
-} __attribute__ ((packed));
+} __packed;
struct compat_group_filter {
__u32 gf_interface;
@@ -549,7 +546,7 @@ struct compat_group_filter {
__u32 gf_numsrc;
struct __kernel_sockaddr_storage gf_slist[1]
__attribute__ ((aligned(4)));
-} __attribute__ ((packed));
+} __packed;
#define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \
sizeof(struct __kernel_sockaddr_storage))
@@ -557,7 +554,7 @@ struct compat_group_filter {
int compat_mc_setsockopt(struct sock *sock, int level, int optname,
char __user *optval, unsigned int optlen,
- int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int))
+ int (*setsockopt)(struct sock *, int, int, char __user *, unsigned int))
{
char __user *koptval = optval;
int koptlen = optlen;
@@ -640,12 +637,11 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
}
return setsockopt(sock, level, optname, koptval, koptlen);
}
-
EXPORT_SYMBOL(compat_mc_setsockopt);
int compat_mc_getsockopt(struct sock *sock, int level, int optname,
char __user *optval, int __user *optlen,
- int (*getsockopt)(struct sock *,int,int,char __user *,int __user *))
+ int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
{
struct compat_group_filter __user *gf32 = (void *)optval;
struct group_filter __user *kgf;
@@ -681,7 +677,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
__put_user(interface, &kgf->gf_interface) ||
__put_user(fmode, &kgf->gf_fmode) ||
__put_user(numsrc, &kgf->gf_numsrc) ||
- copy_in_user(&kgf->gf_group,&gf32->gf_group,sizeof(kgf->gf_group)))
+ copy_in_user(&kgf->gf_group, &gf32->gf_group, sizeof(kgf->gf_group)))
return -EFAULT;
err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen);
@@ -714,21 +710,22 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
copylen = numsrc * sizeof(gf32->gf_slist[0]);
if (copylen > klen)
copylen = klen;
- if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen))
+ if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen))
return -EFAULT;
}
return err;
}
-
EXPORT_SYMBOL(compat_mc_getsockopt);
/* Argument list sizes for compat_sys_socketcall */
#define AL(x) ((x) * sizeof(u32))
-static unsigned char nas[20]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
- AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
- AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
- AL(4),AL(5)};
+static unsigned char nas[20] = {
+ AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
+ AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
+ AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
+ AL(4), AL(5)
+};
#undef AL
asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
@@ -827,7 +824,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
compat_ptr(a[4]), compat_ptr(a[5]));
break;
case SYS_SHUTDOWN:
- ret = sys_shutdown(a0,a1);
+ ret = sys_shutdown(a0, a1);
break;
case SYS_SETSOCKOPT:
ret = compat_sys_setsockopt(a0, a1, a[2],
diff --git a/net/core/dev.c b/net/core/dev.c
index 2b3bf53bc687..7f390b52caab 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -803,35 +803,31 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
EXPORT_SYMBOL(dev_getfirstbyhwtype);
/**
- * dev_get_by_flags - find any device with given flags
+ * dev_get_by_flags_rcu - find any device with given flags
* @net: the applicable net namespace
* @if_flags: IFF_* values
* @mask: bitmask of bits in if_flags to check
*
* Search for any interface with the given flags. Returns NULL if a device
- * is not found or a pointer to the device. The device returned has
- * had a reference added and the pointer is safe until the user calls
- * dev_put to indicate they have finished with it.
+ * is not found or a pointer to the device. Must be called inside
+ * rcu_read_lock(), and result refcount is unchanged.
*/
-struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
+struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
unsigned short mask)
{
struct net_device *dev, *ret;
ret = NULL;
- rcu_read_lock();
for_each_netdev_rcu(net, dev) {
if (((dev->flags ^ if_flags) & mask) == 0) {
- dev_hold(dev);
ret = dev;
break;
}
}
- rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL(dev_get_by_flags);
+EXPORT_SYMBOL(dev_get_by_flags_rcu);
/**
* dev_valid_name - check if name is okay for network device
@@ -1577,7 +1573,9 @@ EXPORT_SYMBOL(__netif_schedule);
void dev_kfree_skb_irq(struct sk_buff *skb)
{
- if (atomic_dec_and_test(&skb->users)) {
+ if (!skb->destructor)
+ dev_kfree_skb(skb);
+ else if (atomic_dec_and_test(&skb->users)) {
struct softnet_data *sd;
unsigned long flags;
@@ -1897,6 +1895,22 @@ static inline void skb_orphan_try(struct sk_buff *skb)
skb_orphan(skb);
}
+/*
+ * Returns true if either:
+ * 1. skb has frag_list and the device doesn't support FRAGLIST, or
+ * 2. skb is fragmented and the device does not support SG, or if
+ * at least one of fragments is in highmem and device does not
+ * support DMA from it.
+ */
+static inline int skb_needs_linearize(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return skb_is_nonlinear(skb) &&
+ ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
+ (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
+ illegal_highdma(dev, skb))));
+}
+
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
@@ -1921,6 +1935,22 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
goto out_kfree_skb;
if (skb->next)
goto gso;
+ } else {
+ if (skb_needs_linearize(skb, dev) &&
+ __skb_linearize(skb))
+ goto out_kfree_skb;
+
+ /* If packet is not checksummed and device does not
+ * support checksumming for this protocol, complete
+ * checksumming here.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_set_transport_header(skb, skb->csum_start -
+ skb_headroom(skb));
+ if (!dev_can_checksum(dev, skb) &&
+ skb_checksum_help(skb))
+ goto out_kfree_skb;
+ }
}
rc = ops->ndo_start_xmit(skb, dev);
@@ -2038,14 +2068,24 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
+ bool contended = qdisc_is_running(q);
int rc;
+ /*
+ * Heuristic to force contended enqueues to serialize on a
+ * separate lock before trying to get qdisc main lock.
+ * This permits __QDISC_STATE_RUNNING owner to get the lock more often
+ * and dequeue packets faster.
+ */
+ if (unlikely(contended))
+ spin_lock(&q->busylock);
+
spin_lock(root_lock);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb);
rc = NET_XMIT_DROP;
} else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
- !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
+ qdisc_run_begin(q)) {
/*
* This is a work-conserving queue; there are no old skbs
* waiting to be sent out; and the qdisc is not running -
@@ -2054,37 +2094,33 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
__qdisc_update_bstats(q, skb->len);
- if (sch_direct_xmit(skb, q, dev, txq, root_lock))
+ if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
+ if (unlikely(contended)) {
+ spin_unlock(&q->busylock);
+ contended = false;
+ }
__qdisc_run(q);
- else
- clear_bit(__QDISC_STATE_RUNNING, &q->state);
+ } else
+ qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
skb_dst_force(skb);
rc = qdisc_enqueue_root(skb, q);
- qdisc_run(q);
+ if (qdisc_run_begin(q)) {
+ if (unlikely(contended)) {
+ spin_unlock(&q->busylock);
+ contended = false;
+ }
+ __qdisc_run(q);
+ }
}
spin_unlock(root_lock);
-
+ if (unlikely(contended))
+ spin_unlock(&q->busylock);
return rc;
}
-/*
- * Returns true if either:
- * 1. skb has frag_list and the device doesn't support FRAGLIST, or
- * 2. skb is fragmented and the device does not support SG, or if
- * at least one of fragments is in highmem and device does not
- * support DMA from it.
- */
-static inline int skb_needs_linearize(struct sk_buff *skb,
- struct net_device *dev)
-{
- return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
- (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
- illegal_highdma(dev, skb)));
-}
-
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
@@ -2117,25 +2153,6 @@ int dev_queue_xmit(struct sk_buff *skb)
struct Qdisc *q;
int rc = -ENOMEM;
- /* GSO will handle the following emulations directly. */
- if (netif_needs_gso(dev, skb))
- goto gso;
-
- /* Convert a paged skb to linear, if required */
- if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
- goto out_kfree_skb;
-
- /* If packet is not checksummed and device does not support
- * checksumming for this protocol, complete checksumming here.
- */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- skb_set_transport_header(skb, skb->csum_start -
- skb_headroom(skb));
- if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
- goto out_kfree_skb;
- }
-
-gso:
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
@@ -2194,7 +2211,6 @@ gso:
rc = -ENETDOWN;
rcu_read_unlock_bh();
-out_kfree_skb:
kfree_skb(skb);
return rc;
out:
@@ -2579,70 +2595,14 @@ static inline int deliver_skb(struct sk_buff *skb,
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
-#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
-
-#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
+#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
+ (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
unsigned char *addr) __read_mostly;
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
#endif
-/*
- * If bridge module is loaded call bridging hook.
- * returns NULL if packet was consumed.
- */
-struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
- struct sk_buff *skb) __read_mostly;
-EXPORT_SYMBOL_GPL(br_handle_frame_hook);
-
-static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
- struct packet_type **pt_prev, int *ret,
- struct net_device *orig_dev)
-{
- struct net_bridge_port *port;
-
- if (skb->pkt_type == PACKET_LOOPBACK ||
- (port = rcu_dereference(skb->dev->br_port)) == NULL)
- return skb;
-
- if (*pt_prev) {
- *ret = deliver_skb(skb, *pt_prev, orig_dev);
- *pt_prev = NULL;
- }
-
- return br_handle_frame_hook(port, skb);
-}
-#else
-#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
-#endif
-
-#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
-struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p,
- struct sk_buff *skb) __read_mostly;
-EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
-
-static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
- struct packet_type **pt_prev,
- int *ret,
- struct net_device *orig_dev)
-{
- struct macvlan_port *port;
-
- port = rcu_dereference(skb->dev->macvlan_port);
- if (!port)
- return skb;
-
- if (*pt_prev) {
- *ret = deliver_skb(skb, *pt_prev, orig_dev);
- *pt_prev = NULL;
- }
- return macvlan_handle_frame_hook(port, skb);
-}
-#else
-#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
-#endif
-
#ifdef CONFIG_NET_CLS_ACT
/* TODO: Maybe we should just force sch_ingress to be compiled in
* when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
@@ -2693,9 +2653,6 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
- } else {
- /* Huh? Why does turning on AF_PACKET affect this? */
- skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
}
switch (ing_filter(skb)) {
@@ -2738,6 +2695,51 @@ void netif_nit_deliver(struct sk_buff *skb)
rcu_read_unlock();
}
+/**
+ * netdev_rx_handler_register - register receive handler
+ * @dev: device to register a handler for
+ * @rx_handler: receive handler to register
+ * @rx_handler_data: data pointer that is used by rx handler
+ *
+ * Register a receive hander for a device. This handler will then be
+ * called from __netif_receive_skb. A negative errno code is returned
+ * on a failure.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int netdev_rx_handler_register(struct net_device *dev,
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data)
+{
+ ASSERT_RTNL();
+
+ if (dev->rx_handler)
+ return -EBUSY;
+
+ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
+ rcu_assign_pointer(dev->rx_handler, rx_handler);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
+
+/**
+ * netdev_rx_handler_unregister - unregister receive handler
+ * @dev: device to unregister a handler from
+ *
+ * Unregister a receive hander from a device.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+void netdev_rx_handler_unregister(struct net_device *dev)
+{
+
+ ASSERT_RTNL();
+ rcu_assign_pointer(dev->rx_handler, NULL);
+ rcu_assign_pointer(dev->rx_handler_data, NULL);
+}
+EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+
static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
struct net_device *master)
{
@@ -2759,7 +2761,8 @@ int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
if (master->priv_flags & IFF_MASTER_ARPMON)
dev->last_rx = jiffies;
- if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
+ if ((master->priv_flags & IFF_MASTER_ALB) &&
+ (master->priv_flags & IFF_BRIDGE_PORT)) {
/* Do address unmangle. The local destination address
* will be always the one master has. Provides the right
* functionality in a bridge.
@@ -2790,6 +2793,7 @@ EXPORT_SYMBOL(__skb_bond_should_drop);
static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
+ rx_handler_func_t *rx_handler;
struct net_device *orig_dev;
struct net_device *master;
struct net_device *null_or_orig;
@@ -2831,8 +2835,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
skb->dev = master;
}
- __get_cpu_var(softnet_data).processed++;
-
+ __this_cpu_inc(softnet_data.processed);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
@@ -2864,12 +2867,17 @@ static int __netif_receive_skb(struct sk_buff *skb)
ncls:
#endif
- skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
- if (!skb)
- goto out;
- skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
- if (!skb)
- goto out;
+ /* Handle special case of bridge or macvlan */
+ rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (rx_handler) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = NULL;
+ }
+ skb = rx_handler(skb);
+ if (!skb)
+ goto out;
+ }
/*
* Make sure frames received on VLAN interfaces stacked on
@@ -3694,10 +3702,10 @@ void dev_seq_stop(struct seq_file *seq, void *v)
static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
- const struct net_device_stats *stats = dev_get_stats(dev);
+ const struct rtnl_link_stats64 *stats = dev_get_stats(dev);
- seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
- "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
stats->rx_errors,
stats->rx_dropped + stats->rx_missed_errors,
@@ -5274,18 +5282,21 @@ EXPORT_SYMBOL(dev_txq_stats_fold);
* @dev: device to get statistics from
*
* Get network statistics from device. The device driver may provide
- * its own method by setting dev->netdev_ops->get_stats; otherwise
- * the internal statistics structure is used.
+ * its own method by setting dev->netdev_ops->get_stats64 or
+ * dev->netdev_ops->get_stats; otherwise the internal statistics
+ * structure is used.
*/
-const struct net_device_stats *dev_get_stats(struct net_device *dev)
+const struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ if (ops->ndo_get_stats64)
+ return ops->ndo_get_stats64(dev);
if (ops->ndo_get_stats)
- return ops->ndo_get_stats(dev);
+ return (struct rtnl_link_stats64 *)ops->ndo_get_stats(dev);
dev_txq_stats_fold(dev, &dev->stats);
- return &dev->stats;
+ return &dev->stats64;
}
EXPORT_SYMBOL(dev_get_stats);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 785e5276a300..9fbe7f7429b0 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -263,6 +263,7 @@ static void __gen_kill_estimator(struct rcu_head *head)
*
* Removes the rate estimator specified by &bstats and &rate_est.
*
+ * Note : Caller should respect an RCU grace period before freeing stats_lock
*/
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 99e7052d7323..ea3bb4c3b87d 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -29,6 +29,7 @@ static const char fmt_hex[] = "%#x\n";
static const char fmt_long_hex[] = "%#lx\n";
static const char fmt_dec[] = "%d\n";
static const char fmt_ulong[] = "%lu\n";
+static const char fmt_u64[] = "%llu\n";
static inline int dev_isalive(const struct net_device *dev)
{
@@ -324,14 +325,13 @@ static ssize_t netstat_show(const struct device *d,
struct net_device *dev = to_net_dev(d);
ssize_t ret = -EINVAL;
- WARN_ON(offset > sizeof(struct net_device_stats) ||
- offset % sizeof(unsigned long) != 0);
+ WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
+ offset % sizeof(u64) != 0);
read_lock(&dev_base_lock);
if (dev_isalive(dev)) {
- const struct net_device_stats *stats = dev_get_stats(dev);
- ret = sprintf(buf, fmt_ulong,
- *(unsigned long *)(((u8 *) stats) + offset));
+ const struct rtnl_link_stats64 *stats = dev_get_stats(dev);
+ ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
}
read_unlock(&dev_base_lock);
return ret;
@@ -343,7 +343,7 @@ static ssize_t show_##name(struct device *d, \
struct device_attribute *attr, char *buf) \
{ \
return netstat_show(d, attr, buf, \
- offsetof(struct net_device_stats, name)); \
+ offsetof(struct rtnl_link_stats64, name)); \
} \
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 94825b109551..560297ee55b4 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -49,7 +49,6 @@ static atomic_t trapped;
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
sizeof(struct iphdr) + sizeof(struct ethhdr))
-static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
static unsigned int carrier_timeout = 4;
@@ -197,7 +196,6 @@ void netpoll_poll_dev(struct net_device *dev)
service_arp_queue(dev->npinfo);
- zap_completion_queue();
}
void netpoll_poll(struct netpoll *np)
@@ -221,40 +219,11 @@ static void refill_skbs(void)
spin_unlock_irqrestore(&skb_pool.lock, flags);
}
-static void zap_completion_queue(void)
-{
- unsigned long flags;
- struct softnet_data *sd = &get_cpu_var(softnet_data);
-
- if (sd->completion_queue) {
- struct sk_buff *clist;
-
- local_irq_save(flags);
- clist = sd->completion_queue;
- sd->completion_queue = NULL;
- local_irq_restore(flags);
-
- while (clist != NULL) {
- struct sk_buff *skb = clist;
- clist = clist->next;
- if (skb->destructor) {
- atomic_inc(&skb->users);
- dev_kfree_skb_any(skb); /* put this one back */
- } else {
- __kfree_skb(skb);
- }
- }
- }
-
- put_cpu_var(softnet_data);
-}
-
static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
{
int count = 0;
struct sk_buff *skb;
- zap_completion_queue();
refill_skbs();
repeat:
@@ -292,6 +261,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
unsigned long tries;
struct net_device *dev = np->dev;
const struct net_device_ops *ops = dev->netdev_ops;
+ /* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo = np->dev->npinfo;
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -723,29 +693,27 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
return -1;
}
-int netpoll_setup(struct netpoll *np)
+int __netpoll_setup(struct netpoll *np)
{
- struct net_device *ndev = NULL;
- struct in_device *in_dev;
+ struct net_device *ndev = np->dev;
struct netpoll_info *npinfo;
- struct netpoll *npe, *tmp;
+ const struct net_device_ops *ops;
unsigned long flags;
int err;
- if (np->dev_name)
- ndev = dev_get_by_name(&init_net, np->dev_name);
- if (!ndev) {
- printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
+ if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !ndev->netdev_ops->ndo_poll_controller) {
+ printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
np->name, np->dev_name);
- return -ENODEV;
+ err = -ENOTSUPP;
+ goto out;
}
- np->dev = ndev;
if (!ndev->npinfo) {
npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
if (!npinfo) {
err = -ENOMEM;
- goto put;
+ goto out;
}
npinfo->rx_flags = 0;
@@ -757,6 +725,13 @@ int netpoll_setup(struct netpoll *np)
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
+
+ ops = np->dev->netdev_ops;
+ if (ops->ndo_netpoll_setup) {
+ err = ops->ndo_netpoll_setup(ndev, npinfo);
+ if (err)
+ goto free_npinfo;
+ }
} else {
npinfo = ndev->npinfo;
atomic_inc(&npinfo->refcnt);
@@ -764,12 +739,38 @@ int netpoll_setup(struct netpoll *np)
npinfo->netpoll = np;
- if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
- !ndev->netdev_ops->ndo_poll_controller) {
- printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
+ if (np->rx_hook) {
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ npinfo->rx_flags |= NETPOLL_RX_ENABLED;
+ list_add_tail(&np->rx, &npinfo->rx_np);
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ }
+
+ /* last thing to do is link it to the net device structure */
+ rcu_assign_pointer(ndev->npinfo, npinfo);
+ rtnl_unlock();
+
+ return 0;
+
+free_npinfo:
+ kfree(npinfo);
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(__netpoll_setup);
+
+int netpoll_setup(struct netpoll *np)
+{
+ struct net_device *ndev = NULL;
+ struct in_device *in_dev;
+ int err;
+
+ if (np->dev_name)
+ ndev = dev_get_by_name(&init_net, np->dev_name);
+ if (!ndev) {
+ printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
np->name, np->dev_name);
- err = -ENOTSUPP;
- goto release;
+ return -ENODEV;
}
if (!netif_running(ndev)) {
@@ -785,7 +786,7 @@ int netpoll_setup(struct netpoll *np)
if (err) {
printk(KERN_ERR "%s: failed to open %s\n",
np->name, ndev->name);
- goto release;
+ goto put;
}
atleast = jiffies + HZ/10;
@@ -822,7 +823,7 @@ int netpoll_setup(struct netpoll *np)
printk(KERN_ERR "%s: no IP address for %s, aborting\n",
np->name, np->dev_name);
err = -EDESTADDRREQ;
- goto release;
+ goto put;
}
np->local_ip = in_dev->ifa_list->ifa_local;
@@ -830,34 +831,20 @@ int netpoll_setup(struct netpoll *np)
printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
}
- if (np->rx_hook) {
- spin_lock_irqsave(&npinfo->rx_lock, flags);
- npinfo->rx_flags |= NETPOLL_RX_ENABLED;
- list_add_tail(&np->rx, &npinfo->rx_np);
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
- }
+ np->dev = ndev;
/* fill up the skb queue */
refill_skbs();
- /* last thing to do is link it to the net device structure */
- ndev->npinfo = npinfo;
+ rtnl_lock();
+ err = __netpoll_setup(np);
+ rtnl_unlock();
- /* avoid racing with NAPI reading npinfo */
- synchronize_rcu();
+ if (err)
+ goto put;
return 0;
- release:
- if (!ndev->npinfo) {
- spin_lock_irqsave(&npinfo->rx_lock, flags);
- list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
- npe->dev = NULL;
- }
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-
- kfree(npinfo);
- }
put:
dev_put(ndev);
return err;
@@ -870,42 +857,56 @@ static int __init netpoll_init(void)
}
core_initcall(netpoll_init);
-void netpoll_cleanup(struct netpoll *np)
+void __netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;
unsigned long flags;
- if (np->dev) {
- npinfo = np->dev->npinfo;
- if (npinfo) {
- if (!list_empty(&npinfo->rx_np)) {
- spin_lock_irqsave(&npinfo->rx_lock, flags);
- list_del(&np->rx);
- if (list_empty(&npinfo->rx_np))
- npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
- }
+ npinfo = np->dev->npinfo;
+ if (!npinfo)
+ return;
- if (atomic_dec_and_test(&npinfo->refcnt)) {
- const struct net_device_ops *ops;
- skb_queue_purge(&npinfo->arp_tx);
- skb_queue_purge(&npinfo->txq);
- cancel_rearming_delayed_work(&npinfo->tx_work);
-
- /* clean after last, unfinished work */
- __skb_queue_purge(&npinfo->txq);
- kfree(npinfo);
- ops = np->dev->netdev_ops;
- if (ops->ndo_netpoll_cleanup)
- ops->ndo_netpoll_cleanup(np->dev);
- else
- np->dev->npinfo = NULL;
- }
- }
+ if (!list_empty(&npinfo->rx_np)) {
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ list_del(&np->rx);
+ if (list_empty(&npinfo->rx_np))
+ npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ }
- dev_put(np->dev);
+ if (atomic_dec_and_test(&npinfo->refcnt)) {
+ const struct net_device_ops *ops;
+
+ ops = np->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(np->dev);
+
+ rcu_assign_pointer(np->dev->npinfo, NULL);
+
+ /* avoid racing with NAPI reading npinfo */
+ synchronize_rcu_bh();
+
+ skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->txq);
+ cancel_rearming_delayed_work(&npinfo->tx_work);
+
+ /* clean after last, unfinished work */
+ __skb_queue_purge(&npinfo->txq);
+ kfree(npinfo);
}
+}
+EXPORT_SYMBOL_GPL(__netpoll_cleanup);
+
+void netpoll_cleanup(struct netpoll *np)
+{
+ if (!np->dev)
+ return;
+
+ rtnl_lock();
+ __netpoll_cleanup(np);
+ rtnl_unlock();
+ dev_put(np->dev);
np->dev = NULL;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1dacd7ba8dbb..6428653e9498 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -169,7 +169,7 @@
#include <asm/dma.h>
#include <asm/div64.h> /* do_div */
-#define VERSION "2.73"
+#define VERSION "2.74"
#define IP_NAME_SZ 32
#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -980,6 +980,40 @@ static ssize_t pktgen_if_write(struct file *file,
(unsigned long long) pkt_dev->delay);
return count;
}
+ if (!strcmp(name, "rate")) {
+ len = num_arg(&user_buffer[i], 10, &value);
+ if (len < 0)
+ return len;
+
+ i += len;
+ if (!value)
+ return len;
+ pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value;
+ if (debug)
+ printk(KERN_INFO
+ "pktgen: Delay set at: %llu ns\n",
+ pkt_dev->delay);
+
+ sprintf(pg_result, "OK: rate=%lu", value);
+ return count;
+ }
+ if (!strcmp(name, "ratep")) {
+ len = num_arg(&user_buffer[i], 10, &value);
+ if (len < 0)
+ return len;
+
+ i += len;
+ if (!value)
+ return len;
+ pkt_dev->delay = NSEC_PER_SEC/value;
+ if (debug)
+ printk(KERN_INFO
+ "pktgen: Delay set at: %llu ns\n",
+ pkt_dev->delay);
+
+ sprintf(pg_result, "OK: rate=%lu", value);
+ return count;
+ }
if (!strcmp(name, "udp_src_min")) {
len = num_arg(&user_buffer[i], 10, &value);
if (len < 0)
@@ -2142,15 +2176,15 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_set_expires(&t.timer, spin_until);
- remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
+ remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
if (remaining <= 0) {
pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
return;
}
start_time = ktime_now();
- if (remaining < 100)
- udelay(remaining); /* really small just spin */
+ if (remaining < 100000)
+ ndelay(remaining); /* really small just spin */
else {
/* see do_nanosleep */
hrtimer_init_sleeper(&t, current);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1a2af24e9e3d..e645778e9b7e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -579,7 +579,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
}
static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
- const struct net_device_stats *b)
+ const struct rtnl_link_stats64 *b)
{
a->rx_packets = b->rx_packets;
a->tx_packets = b->tx_packets;
@@ -610,7 +610,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
a->tx_compressed = b->tx_compressed;
}
-static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b)
+static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
{
struct rtnl_link_stats64 a;
@@ -791,7 +791,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
- const struct net_device_stats *stats;
+ const struct rtnl_link_stats64 *stats;
struct nlattr *attr;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
diff --git a/net/core/scm.c b/net/core/scm.c
index b88f6f9d0b97..681c976307b5 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -170,6 +170,30 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
err = scm_check_creds(&p->creds);
if (err)
goto error;
+
+ if (pid_vnr(p->pid) != p->creds.pid) {
+ struct pid *pid;
+ err = -ESRCH;
+ pid = find_get_pid(p->creds.pid);
+ if (!pid)
+ goto error;
+ put_pid(p->pid);
+ p->pid = pid;
+ }
+
+ if ((p->cred->euid != p->creds.uid) ||
+ (p->cred->egid != p->creds.gid)) {
+ struct cred *cred;
+ err = -ENOMEM;
+ cred = prepare_creds();
+ if (!cred)
+ goto error;
+
+ cred->uid = cred->euid = p->creds.uid;
+ cred->gid = cred->egid = p->creds.uid;
+ put_cred(p->cred);
+ p->cred = cred;
+ }
break;
default:
goto error;
diff --git a/net/core/sock.c b/net/core/sock.c
index 2cf7f9f7e775..fef2434b7c8c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -110,6 +110,7 @@
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/highmem.h>
+#include <linux/user_namespace.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -156,7 +157,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
"sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
"sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
- "sk_lock-AF_IEEE802154",
+ "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
"sk_lock-AF_MAX"
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
@@ -172,7 +173,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-27" , "slock-28" , "slock-AF_CAN" ,
"slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
- "slock-AF_IEEE802154",
+ "slock-AF_IEEE802154", "slock-AF_CAIF" ,
"slock-AF_MAX"
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
@@ -188,7 +189,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-27" , "clock-28" , "clock-AF_CAN" ,
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
- "clock-AF_IEEE802154",
+ "clock-AF_IEEE802154", "clock-AF_CAIF" ,
"clock-AF_MAX"
};
@@ -749,6 +750,20 @@ set_rcvbuf:
EXPORT_SYMBOL(sock_setsockopt);
+void cred_to_ucred(struct pid *pid, const struct cred *cred,
+ struct ucred *ucred)
+{
+ ucred->pid = pid_vnr(pid);
+ ucred->uid = ucred->gid = -1;
+ if (cred) {
+ struct user_namespace *current_ns = current_user_ns();
+
+ ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
+ ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
+ }
+}
+EXPORT_SYMBOL_GPL(cred_to_ucred);
+
int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -901,11 +916,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_PEERCRED:
- if (len > sizeof(sk->sk_peercred))
- len = sizeof(sk->sk_peercred);
- if (copy_to_user(optval, &sk->sk_peercred, len))
+ {
+ struct ucred peercred;
+ if (len > sizeof(peercred))
+ len = sizeof(peercred);
+ cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
+ if (copy_to_user(optval, &peercred, len))
return -EFAULT;
goto lenout;
+ }
case SO_PEERNAME:
{
@@ -1119,6 +1138,9 @@ static void __sk_free(struct sock *sk)
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_omem_alloc));
+ if (sk->sk_peer_cred)
+ put_cred(sk->sk_peer_cred);
+ put_pid(sk->sk_peer_pid);
put_net(sock_net(sk));
sk_prot_free(sk->sk_prot_creator, sk);
}
@@ -1954,9 +1976,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
- sk->sk_peercred.pid = 0;
- sk->sk_peercred.uid = -1;
- sk->sk_peercred.gid = -1;
+ sk->sk_peer_pid = NULL;
+ sk->sk_peer_cred = NULL;
sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index d9b11ef8694c..d4a166f0f391 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -105,7 +105,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
goto failure;
/* OK, now commit destination to socket. */
- sk_setup_caps(sk, &rt->u.dst);
+ sk_setup_caps(sk, &rt->dst);
dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
@@ -475,7 +475,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
return NULL;
}
- return &rt->u.dst;
+ return &rt->dst;
}
static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 091698899594..6e3f32575df7 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -248,7 +248,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
struct ipv6_txoptions *opt = NULL;
- struct in6_addr *final_p = NULL, final;
+ struct in6_addr *final_p, final;
struct flowi fl;
int err = -1;
struct dst_entry *dst;
@@ -265,13 +265,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
opt = np->opt;
- if (opt != NULL && opt->srcrt != NULL) {
- const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
-
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, opt, &final);
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
@@ -545,19 +539,13 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
goto out_overflow;
if (dst == NULL) {
- struct in6_addr *final_p = NULL, final;
+ struct in6_addr *final_p, final;
struct flowi fl;
memset(&fl, 0, sizeof(fl));
fl.proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
- if (opt != NULL && opt->srcrt != NULL) {
- const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
-
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, opt, &final);
ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
fl.oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = inet_rsk(req)->rmt_port;
@@ -885,7 +873,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
- struct in6_addr *saddr = NULL, *final_p = NULL, final;
+ struct in6_addr *saddr = NULL, *final_p, final;
struct flowi fl;
struct dst_entry *dst;
int addr_type;
@@ -988,13 +976,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl.fl_ip_sport = inet->inet_sport;
security_sk_classify_flow(sk, &fl);
- if (np->opt != NULL && np->opt->srcrt != NULL) {
- const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
-
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, np->opt, &final);
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b03ecf6b2bb0..f79bcef5088f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -473,14 +473,9 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type,
if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
return -EINVAL;
- val = kmalloc(optlen, GFP_KERNEL);
- if (val == NULL)
- return -ENOMEM;
-
- if (copy_from_user(val, optval, optlen)) {
- kfree(val);
- return -EFAULT;
- }
+ val = memdup_user(optval, optlen);
+ if (IS_ERR(val))
+ return PTR_ERR(val);
lock_sock(sk);
if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 812e6dff6067..6585ea6d1182 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -146,13 +146,13 @@ static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
static inline void dnrt_free(struct dn_route *rt)
{
- call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+ call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
}
static inline void dnrt_drop(struct dn_route *rt)
{
- dst_release(&rt->u.dst);
- call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+ dst_release(&rt->dst);
+ call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
}
static void dn_dst_check_expire(unsigned long dummy)
@@ -167,13 +167,13 @@ static void dn_dst_check_expire(unsigned long dummy)
spin_lock(&dn_rt_hash_table[i].lock);
while((rt=*rtp) != NULL) {
- if (atomic_read(&rt->u.dst.__refcnt) ||
- (now - rt->u.dst.lastuse) < expire) {
- rtp = &rt->u.dst.dn_next;
+ if (atomic_read(&rt->dst.__refcnt) ||
+ (now - rt->dst.lastuse) < expire) {
+ rtp = &rt->dst.dn_next;
continue;
}
- *rtp = rt->u.dst.dn_next;
- rt->u.dst.dn_next = NULL;
+ *rtp = rt->dst.dn_next;
+ rt->dst.dn_next = NULL;
dnrt_free(rt);
}
spin_unlock(&dn_rt_hash_table[i].lock);
@@ -198,13 +198,13 @@ static int dn_dst_gc(struct dst_ops *ops)
rtp = &dn_rt_hash_table[i].chain;
while((rt=*rtp) != NULL) {
- if (atomic_read(&rt->u.dst.__refcnt) ||
- (now - rt->u.dst.lastuse) < expire) {
- rtp = &rt->u.dst.dn_next;
+ if (atomic_read(&rt->dst.__refcnt) ||
+ (now - rt->dst.lastuse) < expire) {
+ rtp = &rt->dst.dn_next;
continue;
}
- *rtp = rt->u.dst.dn_next;
- rt->u.dst.dn_next = NULL;
+ *rtp = rt->dst.dn_next;
+ rt->dst.dn_next = NULL;
dnrt_drop(rt);
break;
}
@@ -287,25 +287,25 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
while((rth = *rthp) != NULL) {
if (compare_keys(&rth->fl, &rt->fl)) {
/* Put it first */
- *rthp = rth->u.dst.dn_next;
- rcu_assign_pointer(rth->u.dst.dn_next,
+ *rthp = rth->dst.dn_next;
+ rcu_assign_pointer(rth->dst.dn_next,
dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
- dst_use(&rth->u.dst, now);
+ dst_use(&rth->dst, now);
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
dnrt_drop(rt);
*rp = rth;
return 0;
}
- rthp = &rth->u.dst.dn_next;
+ rthp = &rth->dst.dn_next;
}
- rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
+ rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
- dst_use(&rt->u.dst, now);
+ dst_use(&rt->dst, now);
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
*rp = rt;
return 0;
@@ -323,8 +323,8 @@ static void dn_run_flush(unsigned long dummy)
goto nothing_to_declare;
for(; rt; rt=next) {
- next = rt->u.dst.dn_next;
- rt->u.dst.dn_next = NULL;
+ next = rt->dst.dn_next;
+ rt->dst.dn_next = NULL;
dst_free((struct dst_entry *)rt);
}
@@ -743,7 +743,7 @@ static int dn_forward(struct sk_buff *skb)
/* Ensure that we have enough space for headers */
rt = (struct dn_route *)skb_dst(skb);
header_len = dn_db->use_long ? 21 : 6;
- if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len))
+ if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
goto drop;
/*
@@ -752,7 +752,7 @@ static int dn_forward(struct sk_buff *skb)
if (++cb->hops > 30)
goto drop;
- skb->dev = rt->u.dst.dev;
+ skb->dev = rt->dst.dev;
/*
* If packet goes out same interface it came in on, then set
@@ -792,7 +792,7 @@ static int dn_rt_bug(struct sk_buff *skb)
static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
{
struct dn_fib_info *fi = res->fi;
- struct net_device *dev = rt->u.dst.dev;
+ struct net_device *dev = rt->dst.dev;
struct neighbour *n;
unsigned mss;
@@ -800,25 +800,25 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
if (DN_FIB_RES_GW(*res) &&
DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = DN_FIB_RES_GW(*res);
- memcpy(rt->u.dst.metrics, fi->fib_metrics,
- sizeof(rt->u.dst.metrics));
+ memcpy(rt->dst.metrics, fi->fib_metrics,
+ sizeof(rt->dst.metrics));
}
rt->rt_type = res->type;
- if (dev != NULL && rt->u.dst.neighbour == NULL) {
+ if (dev != NULL && rt->dst.neighbour == NULL) {
n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
if (IS_ERR(n))
return PTR_ERR(n);
- rt->u.dst.neighbour = n;
+ rt->dst.neighbour = n;
}
- if (dst_metric(&rt->u.dst, RTAX_MTU) == 0 ||
- dst_metric(&rt->u.dst, RTAX_MTU) > rt->u.dst.dev->mtu)
- rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
- mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst));
- if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0 ||
- dst_metric(&rt->u.dst, RTAX_ADVMSS) > mss)
- rt->u.dst.metrics[RTAX_ADVMSS-1] = mss;
+ if (dst_metric(&rt->dst, RTAX_MTU) == 0 ||
+ dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
+ rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
+ mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
+ if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 ||
+ dst_metric(&rt->dst, RTAX_ADVMSS) > mss)
+ rt->dst.metrics[RTAX_ADVMSS-1] = mss;
return 0;
}
@@ -1096,8 +1096,8 @@ make_route:
if (rt == NULL)
goto e_nobufs;
- atomic_set(&rt->u.dst.__refcnt, 1);
- rt->u.dst.flags = DST_HOST;
+ atomic_set(&rt->dst.__refcnt, 1);
+ rt->dst.flags = DST_HOST;
rt->fl.fld_src = oldflp->fld_src;
rt->fl.fld_dst = oldflp->fld_dst;
@@ -1113,17 +1113,17 @@ make_route:
rt->rt_dst_map = fl.fld_dst;
rt->rt_src_map = fl.fld_src;
- rt->u.dst.dev = dev_out;
+ rt->dst.dev = dev_out;
dev_hold(dev_out);
- rt->u.dst.neighbour = neigh;
+ rt->dst.neighbour = neigh;
neigh = NULL;
- rt->u.dst.lastuse = jiffies;
- rt->u.dst.output = dn_output;
- rt->u.dst.input = dn_rt_bug;
+ rt->dst.lastuse = jiffies;
+ rt->dst.output = dn_output;
+ rt->dst.input = dn_rt_bug;
rt->rt_flags = flags;
if (flags & RTCF_LOCAL)
- rt->u.dst.input = dn_nsp_rx;
+ rt->dst.input = dn_nsp_rx;
err = dn_rt_set_next_hop(rt, &res);
if (err)
@@ -1152,7 +1152,7 @@ e_nobufs:
err = -ENOBUFS;
goto done;
e_neighbour:
- dst_free(&rt->u.dst);
+ dst_free(&rt->dst);
goto e_nobufs;
}
@@ -1168,15 +1168,15 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
if (!(flags & MSG_TRYHARD)) {
rcu_read_lock_bh();
for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
- rt = rcu_dereference_bh(rt->u.dst.dn_next)) {
+ rt = rcu_dereference_bh(rt->dst.dn_next)) {
if ((flp->fld_dst == rt->fl.fld_dst) &&
(flp->fld_src == rt->fl.fld_src) &&
(flp->mark == rt->fl.mark) &&
(rt->fl.iif == 0) &&
(rt->fl.oif == flp->oif)) {
- dst_use(&rt->u.dst, jiffies);
+ dst_use(&rt->dst, jiffies);
rcu_read_unlock_bh();
- *pprt = &rt->u.dst;
+ *pprt = &rt->dst;
return 0;
}
}
@@ -1375,29 +1375,29 @@ make_route:
rt->fl.iif = in_dev->ifindex;
rt->fl.mark = fl.mark;
- rt->u.dst.flags = DST_HOST;
- rt->u.dst.neighbour = neigh;
- rt->u.dst.dev = out_dev;
- rt->u.dst.lastuse = jiffies;
- rt->u.dst.output = dn_rt_bug;
+ rt->dst.flags = DST_HOST;
+ rt->dst.neighbour = neigh;
+ rt->dst.dev = out_dev;
+ rt->dst.lastuse = jiffies;
+ rt->dst.output = dn_rt_bug;
switch(res.type) {
case RTN_UNICAST:
- rt->u.dst.input = dn_forward;
+ rt->dst.input = dn_forward;
break;
case RTN_LOCAL:
- rt->u.dst.output = dn_output;
- rt->u.dst.input = dn_nsp_rx;
- rt->u.dst.dev = in_dev;
+ rt->dst.output = dn_output;
+ rt->dst.input = dn_nsp_rx;
+ rt->dst.dev = in_dev;
flags |= RTCF_LOCAL;
break;
default:
case RTN_UNREACHABLE:
case RTN_BLACKHOLE:
- rt->u.dst.input = dst_discard;
+ rt->dst.input = dst_discard;
}
rt->rt_flags = flags;
- if (rt->u.dst.dev)
- dev_hold(rt->u.dst.dev);
+ if (rt->dst.dev)
+ dev_hold(rt->dst.dev);
err = dn_rt_set_next_hop(rt, &res);
if (err)
@@ -1405,7 +1405,7 @@ make_route:
hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
dn_insert_route(rt, hash, &rt);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
done:
if (neigh)
@@ -1427,7 +1427,7 @@ e_nobufs:
goto done;
e_neighbour:
- dst_free(&rt->u.dst);
+ dst_free(&rt->dst);
goto done;
}
@@ -1442,13 +1442,13 @@ static int dn_route_input(struct sk_buff *skb)
rcu_read_lock();
for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
- rt = rcu_dereference(rt->u.dst.dn_next)) {
+ rt = rcu_dereference(rt->dst.dn_next)) {
if ((rt->fl.fld_src == cb->src) &&
(rt->fl.fld_dst == cb->dst) &&
(rt->fl.oif == 0) &&
(rt->fl.mark == skb->mark) &&
(rt->fl.iif == cb->iif)) {
- dst_use(&rt->u.dst, jiffies);
+ dst_use(&rt->dst, jiffies);
rcu_read_unlock();
skb_dst_set(skb, (struct dst_entry *)rt);
return 0;
@@ -1487,8 +1487,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
r->rtm_src_len = 16;
RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src);
}
- if (rt->u.dst.dev)
- RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
+ if (rt->dst.dev)
+ RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex);
/*
* Note to self - change this if input routes reverse direction when
* they deal only with inputs and not with replies like they do
@@ -1497,11 +1497,11 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
if (rt->rt_daddr != rt->rt_gateway)
RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
- if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
+ if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
goto rtattr_failure;
- expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
- if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires,
- rt->u.dst.error) < 0)
+ expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
+ rt->dst.error) < 0)
goto rtattr_failure;
if (rt->fl.iif)
RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
@@ -1568,8 +1568,8 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
local_bh_enable();
memset(cb, 0, sizeof(struct dn_skb_cb));
rt = (struct dn_route *)skb_dst(skb);
- if (!err && -rt->u.dst.error)
- err = rt->u.dst.error;
+ if (!err && -rt->dst.error)
+ err = rt->dst.error;
} else {
int oif = 0;
if (rta[RTA_OIF - 1])
@@ -1583,7 +1583,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
skb->dev = NULL;
if (err)
goto out_free;
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
@@ -1632,10 +1632,10 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock_bh();
for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
rt;
- rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) {
+ rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
if (idx < s_idx)
continue;
- skb_dst_set(skb, dst_clone(&rt->u.dst));
+ skb_dst_set(skb, dst_clone(&rt->dst));
if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1, NLM_F_MULTI) <= 0) {
@@ -1678,7 +1678,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
{
struct dn_rt_cache_iter_state *s = seq->private;
- rt = rt->u.dst.dn_next;
+ rt = rt->dst.dn_next;
while(!rt) {
rcu_read_unlock_bh();
if (--s->bucket < 0)
@@ -1719,12 +1719,12 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
- rt->u.dst.dev ? rt->u.dst.dev->name : "*",
+ rt->dst.dev ? rt->dst.dev->name : "*",
dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
- atomic_read(&rt->u.dst.__refcnt),
- rt->u.dst.__use,
- (int) dst_metric(&rt->u.dst, RTAX_RTT));
+ atomic_read(&rt->dst.__refcnt),
+ rt->dst.__use,
+ (int) dst_metric(&rt->dst, RTAX_RTT));
return 0;
}
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 2a5a8053e000..dc54bd0d083b 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -48,7 +48,7 @@
static const struct proto_ops econet_ops;
static struct hlist_head econet_sklist;
-static DEFINE_RWLOCK(econet_lock);
+static DEFINE_SPINLOCK(econet_lock);
static DEFINE_MUTEX(econet_mutex);
/* Since there are only 256 possible network numbers (or fewer, depends
@@ -98,16 +98,16 @@ struct ec_cb
static void econet_remove_socket(struct hlist_head *list, struct sock *sk)
{
- write_lock_bh(&econet_lock);
+ spin_lock_bh(&econet_lock);
sk_del_node_init(sk);
- write_unlock_bh(&econet_lock);
+ spin_unlock_bh(&econet_lock);
}
static void econet_insert_socket(struct hlist_head *list, struct sock *sk)
{
- write_lock_bh(&econet_lock);
+ spin_lock_bh(&econet_lock);
sk_add_node(sk, list);
- write_unlock_bh(&econet_lock);
+ spin_unlock_bh(&econet_lock);
}
/*
@@ -782,15 +782,19 @@ static struct sock *ec_listening_socket(unsigned char port, unsigned char
struct sock *sk;
struct hlist_node *node;
+ spin_lock(&econet_lock);
sk_for_each(sk, node, &econet_sklist) {
struct econet_sock *opt = ec_sk(sk);
if ((opt->port == port || opt->port == 0) &&
(opt->station == station || opt->station == 0) &&
- (opt->net == net || opt->net == 0))
+ (opt->net == net || opt->net == 0)) {
+ sock_hold(sk);
goto found;
+ }
}
sk = NULL;
found:
+ spin_unlock(&econet_lock);
return sk;
}
@@ -852,7 +856,7 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
{
struct iphdr *ip = ip_hdr(skb);
unsigned char stn = ntohl(ip->saddr) & 0xff;
- struct sock *sk;
+ struct sock *sk = NULL;
struct sk_buff *newskb;
struct ec_device *edev = skb->dev->ec_ptr;
@@ -882,10 +886,13 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
}
aun_send_response(ip->saddr, ah->handle, 3, 0);
+ sock_put(sk);
return;
bad:
aun_send_response(ip->saddr, ah->handle, 4, 0);
+ if (sk)
+ sock_put(sk);
}
/*
@@ -1050,7 +1057,7 @@ release:
static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
struct ec_framehdr *hdr;
- struct sock *sk;
+ struct sock *sk = NULL;
struct ec_device *edev = dev->ec_ptr;
if (!net_eq(dev_net(dev), &init_net))
@@ -1085,10 +1092,12 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb,
hdr->port))
goto drop;
-
+ sock_put(sk);
return NET_RX_SUCCESS;
drop:
+ if (sk)
+ sock_put(sk);
kfree_skb(skb);
return NET_RX_DROP;
}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 61ec0329316c..215c83986a9d 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -158,7 +158,6 @@ EXPORT_SYMBOL(eth_rebuild_header);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct ethhdr *eth;
- unsigned char *rawp;
skb->dev = dev;
skb_reset_mac_header(skb);
@@ -199,15 +198,13 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
if (ntohs(eth->h_proto) >= 1536)
return eth->h_proto;
- rawp = skb->data;
-
/*
* This is a magic hack to spot IPX packets. Older Novell breaks
* the protocol design and runs IPX over 802.3 without an 802.2 LLC
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
* won't work for fault tolerant netware but does for the rest.
*/
- if (*(unsigned short *)rawp == 0xFFFF)
+ if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF)
return htons(ETH_P_802_3);
/*
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 551ce564b035..b4c0969137cb 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -355,6 +355,8 @@ lookup_protocol:
inet = inet_sk(sk);
inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
+ inet->nodefrag = 0;
+
if (SOCK_RAW == sock->type) {
inet->inet_num = protocol;
if (IPPROTO_RAW == protocol)
@@ -1100,7 +1102,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
if (err)
return err;
- sk_setup_caps(sk, &rt->u.dst);
+ sk_setup_caps(sk, &rt->dst);
new_saddr = rt->rt_src;
@@ -1166,7 +1168,7 @@ int inet_sk_rebuild_header(struct sock *sk)
err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0);
}
if (!err)
- sk_setup_caps(sk, &rt->u.dst);
+ sk_setup_caps(sk, &rt->dst);
else {
/* Routing failed... */
sk->sk_route_caps = 0;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index f094b75810db..cf78f41830ca 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -427,7 +427,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
if (ip_route_output_key(net, &rt, &fl) < 0)
return 1;
- if (rt->u.dst.dev != dev) {
+ if (rt->dst.dev != dev) {
NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
flag = 1;
}
@@ -532,7 +532,7 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
struct in_device *out_dev;
int imi, omi = -1;
- if (rt->u.dst.dev == dev)
+ if (rt->dst.dev == dev)
return 0;
if (!IN_DEV_PROXY_ARP(in_dev))
@@ -545,10 +545,10 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
/* place to check for proxy_arp for routes */
- if ((out_dev = in_dev_get(rt->u.dst.dev)) != NULL) {
+ out_dev = __in_dev_get_rcu(rt->dst.dev);
+ if (out_dev)
omi = IN_DEV_MEDIUM_ID(out_dev);
- in_dev_put(out_dev);
- }
+
return (omi != imi && omi != -1);
}
@@ -576,7 +576,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev,
__be32 sip, __be32 tip)
{
/* Private VLAN is only concerned about the same ethernet segment */
- if (rt->u.dst.dev != dev)
+ if (rt->dst.dev != dev)
return 0;
/* Don't reply on self probes (often done by windowz boxes)*/
@@ -741,7 +741,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
static int arp_process(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
- struct in_device *in_dev = in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
struct arphdr *arp;
unsigned char *arp_ptr;
struct rtable *rt;
@@ -890,7 +890,6 @@ static int arp_process(struct sk_buff *skb)
arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
} else {
pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb);
- in_dev_put(in_dev);
return 0;
}
goto out;
@@ -936,8 +935,6 @@ static int arp_process(struct sk_buff *skb)
}
out:
- if (in_dev)
- in_dev_put(in_dev);
consume_skb(skb);
return 0;
}
@@ -1045,7 +1042,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
struct rtable * rt;
if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
return err;
- dev = rt->u.dst.dev;
+ dev = rt->dst.dev;
ip_rt_put(rt);
if (!dev)
return -EINVAL;
@@ -1152,7 +1149,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
struct rtable * rt;
if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
return err;
- dev = rt->u.dst.dev;
+ dev = rt->dst.dev;
ip_rt_put(rt);
if (!dev)
return -EINVAL;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index fb2465811b48..fe3daa7f07a9 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -69,7 +69,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk->sk_state = TCP_ESTABLISHED;
inet->inet_id = jiffies;
- sk_dst_set(sk, &rt->u.dst);
+ sk_dst_set(sk, &rt->dst);
return(0);
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 382bc768ed56..da14c49284f4 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1081,6 +1081,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
}
ip_mc_up(in_dev);
/* fall through */
+ case NETDEV_NOTIFY_PEERS:
case NETDEV_CHANGEADDR:
/* Send gratuitous ARP to notify of link change */
if (IN_DEV_ARP_NOTIFY(in_dev)) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4f0ed458c883..e830f7a123bd 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -284,7 +284,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
if (no_addr)
goto last_resort;
if (rpf == 1)
- goto e_inval;
+ goto e_rpf;
fl.oif = dev->ifindex;
ret = 0;
@@ -299,7 +299,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
last_resort:
if (rpf)
- goto e_inval;
+ goto e_rpf;
*spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
*itag = 0;
return 0;
@@ -308,6 +308,8 @@ e_inval_res:
fib_res_put(&res);
e_inval:
return -EINVAL;
+e_rpf:
+ return -EXDEV;
}
static inline __be32 sk_extract_addr(struct sockaddr *addr)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index d65e9215bcd7..7569b21a3a2d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -271,7 +271,7 @@ int xrlim_allow(struct dst_entry *dst, int timeout)
static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
int type, int code)
{
- struct dst_entry *dst = &rt->u.dst;
+ struct dst_entry *dst = &rt->dst;
int rc = 1;
if (type > NR_ICMP_TYPES)
@@ -327,7 +327,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
struct sock *sk;
struct sk_buff *skb;
- sk = icmp_sk(dev_net((*rt)->u.dst.dev));
+ sk = icmp_sk(dev_net((*rt)->dst.dev));
if (ip_append_data(sk, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len,
@@ -359,7 +359,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{
struct ipcm_cookie ipc;
struct rtable *rt = skb_rtable(skb);
- struct net *net = dev_net(rt->u.dst.dev);
+ struct net *net = dev_net(rt->dst.dev);
struct sock *sk;
struct inet_sock *inet;
__be32 daddr;
@@ -427,7 +427,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
if (!rt)
goto out;
- net = dev_net(rt->u.dst.dev);
+ net = dev_net(rt->dst.dev);
/*
* Find the original header. It is expected to be valid, of course.
@@ -596,9 +596,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
- RT_TOS(tos), rt2->u.dst.dev);
+ RT_TOS(tos), rt2->dst.dev);
- dst_release(&rt2->u.dst);
+ dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
@@ -610,7 +610,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
XFRM_LOOKUP_ICMP);
switch (err) {
case 0:
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
rt = rt2;
break;
case -EPERM:
@@ -629,7 +629,7 @@ route_done:
/* RFC says return as much as we can without exceeding 576 bytes. */
- room = dst_mtu(&rt->u.dst);
+ room = dst_mtu(&rt->dst);
if (room > 576)
room = 576;
room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
@@ -925,6 +925,7 @@ static void icmp_address(struct sk_buff *skb)
/*
* RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain
* loudly if an inconsistency is found.
+ * called with rcu_read_lock()
*/
static void icmp_address_reply(struct sk_buff *skb)
@@ -935,12 +936,12 @@ static void icmp_address_reply(struct sk_buff *skb)
struct in_ifaddr *ifa;
if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
- goto out;
+ return;
- in_dev = in_dev_get(dev);
+ in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
- goto out;
- rcu_read_lock();
+ return;
+
if (in_dev->ifa_list &&
IN_DEV_LOG_MARTIANS(in_dev) &&
IN_DEV_FORWARD(in_dev)) {
@@ -958,9 +959,6 @@ static void icmp_address_reply(struct sk_buff *skb)
mp, dev->name, &rt->rt_src);
}
}
- rcu_read_unlock();
- in_dev_put(in_dev);
-out:;
}
static void icmp_discard(struct sk_buff *skb)
@@ -974,7 +972,7 @@ int icmp_rcv(struct sk_buff *skb)
{
struct icmphdr *icmph;
struct rtable *rt = skb_rtable(skb);
- struct net *net = dev_net(rt->u.dst.dev);
+ struct net *net = dev_net(rt->dst.dev);
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5fff865a4fa7..b5580d422994 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -312,7 +312,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
return NULL;
}
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
skb->dev = dev;
skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -330,7 +330,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
pip->saddr = rt->rt_src;
pip->protocol = IPPROTO_IGMP;
pip->tot_len = 0; /* filled in later */
- ip_select_ident(pip, &rt->u.dst, NULL);
+ ip_select_ident(pip, &rt->dst, NULL);
((u8*)&pip[1])[0] = IPOPT_RA;
((u8*)&pip[1])[1] = 4;
((u8*)&pip[1])[2] = 0;
@@ -660,7 +660,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
return -1;
}
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -676,7 +676,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
iph->daddr = dst;
iph->saddr = rt->rt_src;
iph->protocol = IPPROTO_IGMP;
- ip_select_ident(iph, &rt->u.dst, NULL);
+ ip_select_ident(iph, &rt->dst, NULL);
((u8*)&iph[1])[0] = IPOPT_RA;
((u8*)&iph[1])[1] = 4;
((u8*)&iph[1])[2] = 0;
@@ -916,18 +916,19 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
read_unlock(&in_dev->mc_list_lock);
}
+/* called in rcu_read_lock() section */
int igmp_rcv(struct sk_buff *skb)
{
/* This basically follows the spec line by line -- see RFC1112 */
struct igmphdr *ih;
- struct in_device *in_dev = in_dev_get(skb->dev);
+ struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
int len = skb->len;
if (in_dev == NULL)
goto drop;
if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
- goto drop_ref;
+ goto drop;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
@@ -937,7 +938,7 @@ int igmp_rcv(struct sk_buff *skb)
case CHECKSUM_NONE:
skb->csum = 0;
if (__skb_checksum_complete(skb))
- goto drop_ref;
+ goto drop;
}
ih = igmp_hdr(skb);
@@ -957,7 +958,6 @@ int igmp_rcv(struct sk_buff *skb)
break;
case IGMP_PIM:
#ifdef CONFIG_IP_PIMSM_V1
- in_dev_put(in_dev);
return pim_rcv_v1(skb);
#endif
case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -971,8 +971,6 @@ int igmp_rcv(struct sk_buff *skb)
break;
}
-drop_ref:
- in_dev_put(in_dev);
drop:
kfree_skb(skb);
return 0;
@@ -1427,7 +1425,7 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
}
if (!dev && !ip_route_output_key(net, &rt, &fl)) {
- dev = rt->u.dst.dev;
+ dev = rt->dst.dev;
ip_rt_put(rt);
}
if (dev) {
@@ -1646,8 +1644,7 @@ static int sf_setstate(struct ip_mc_list *pmc)
if (dpsf->sf_inaddr == psf->sf_inaddr)
break;
if (!dpsf) {
- dpsf = (struct ip_sf_list *)
- kmalloc(sizeof(*dpsf), GFP_ATOMIC);
+ dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
if (!dpsf)
continue;
*dpsf = *psf;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 70eb3507c406..57c9e4d7b805 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -383,7 +383,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
goto no_route;
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
goto route_err;
- return &rt->u.dst;
+ return &rt->dst;
route_err:
ip_rt_put(rt);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 6bcfe52a9c87..9ffa24b9a804 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -51,8 +51,8 @@
* lookups performed with disabled BHs.
*
* Serialisation issues.
- * 1. Nodes may appear in the tree only with the pool write lock held.
- * 2. Nodes may disappear from the tree only with the pool write lock held
+ * 1. Nodes may appear in the tree only with the pool lock held.
+ * 2. Nodes may disappear from the tree only with the pool lock held
* AND reference count being 0.
* 3. Nodes appears and disappears from unused node list only under
* "inet_peer_unused_lock".
@@ -64,23 +64,31 @@
* usually under some other lock to prevent node disappearing
* dtime: unused node list lock
* v4daddr: unchangeable
- * ip_id_count: idlock
+ * ip_id_count: atomic value (no lock needed)
*/
static struct kmem_cache *peer_cachep __read_mostly;
#define node_height(x) x->avl_height
-static struct inet_peer peer_fake_node = {
- .avl_left = &peer_fake_node,
- .avl_right = &peer_fake_node,
+
+#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+static const struct inet_peer peer_fake_node = {
+ .avl_left = peer_avl_empty,
+ .avl_right = peer_avl_empty,
.avl_height = 0
};
-#define peer_avl_empty (&peer_fake_node)
-static struct inet_peer *peer_root = peer_avl_empty;
-static DEFINE_RWLOCK(peer_pool_lock);
+
+static struct {
+ struct inet_peer *root;
+ spinlock_t lock;
+ int total;
+} peers = {
+ .root = peer_avl_empty,
+ .lock = __SPIN_LOCK_UNLOCKED(peers.lock),
+ .total = 0,
+};
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
-static int peer_total;
/* Exported for sysctl_net_ipv4. */
int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
* aggressively at this stage */
@@ -89,8 +97,13 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
int inet_peer_gc_mintime __read_mostly = 10 * HZ;
int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
-static LIST_HEAD(unused_peers);
-static DEFINE_SPINLOCK(inet_peer_unused_lock);
+static struct {
+ struct list_head list;
+ spinlock_t lock;
+} unused_peers = {
+ .list = LIST_HEAD_INIT(unused_peers.list),
+ .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock),
+};
static void peer_check_expire(unsigned long dummy);
static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -116,7 +129,7 @@ void __init inet_initpeers(void)
peer_cachep = kmem_cache_create("inet_peer_cache",
sizeof(struct inet_peer),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
/* All the timers, started at system startup tend
@@ -131,38 +144,69 @@ void __init inet_initpeers(void)
/* Called with or without local BH being disabled. */
static void unlink_from_unused(struct inet_peer *p)
{
- spin_lock_bh(&inet_peer_unused_lock);
- list_del_init(&p->unused);
- spin_unlock_bh(&inet_peer_unused_lock);
+ if (!list_empty(&p->unused)) {
+ spin_lock_bh(&unused_peers.lock);
+ list_del_init(&p->unused);
+ spin_unlock_bh(&unused_peers.lock);
+ }
}
/*
* Called with local BH disabled and the pool lock held.
- * _stack is known to be NULL or not at compile time,
- * so compiler will optimize the if (_stack) tests.
*/
#define lookup(_daddr, _stack) \
({ \
struct inet_peer *u, **v; \
- if (_stack != NULL) { \
- stackptr = _stack; \
- *stackptr++ = &peer_root; \
- } \
- for (u = peer_root; u != peer_avl_empty; ) { \
+ \
+ stackptr = _stack; \
+ *stackptr++ = &peers.root; \
+ for (u = peers.root; u != peer_avl_empty; ) { \
if (_daddr == u->v4daddr) \
break; \
if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
v = &u->avl_left; \
else \
v = &u->avl_right; \
- if (_stack != NULL) \
- *stackptr++ = v; \
+ *stackptr++ = v; \
u = *v; \
} \
u; \
})
-/* Called with local BH disabled and the pool write lock held. */
+/*
+ * Called with rcu_read_lock_bh()
+ * Because we hold no lock against a writer, its quite possible we fall
+ * in an endless loop.
+ * But every pointer we follow is guaranteed to be valid thanks to RCU.
+ * We exit from this function if number of links exceeds PEER_MAXDEPTH
+ */
+static struct inet_peer *lookup_rcu_bh(__be32 daddr)
+{
+ struct inet_peer *u = rcu_dereference_bh(peers.root);
+ int count = 0;
+
+ while (u != peer_avl_empty) {
+ if (daddr == u->v4daddr) {
+ /* Before taking a reference, check if this entry was
+ * deleted, unlink_from_pool() sets refcnt=-1 to make
+ * distinction between an unused entry (refcnt=0) and
+ * a freed one.
+ */
+ if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
+ u = NULL;
+ return u;
+ }
+ if ((__force __u32)daddr < (__force __u32)u->v4daddr)
+ u = rcu_dereference_bh(u->avl_left);
+ else
+ u = rcu_dereference_bh(u->avl_right);
+ if (unlikely(++count == PEER_MAXDEPTH))
+ break;
+ }
+ return NULL;
+}
+
+/* Called with local BH disabled and the pool lock held. */
#define lookup_rightempty(start) \
({ \
struct inet_peer *u, **v; \
@@ -176,9 +220,10 @@ static void unlink_from_unused(struct inet_peer *p)
u; \
})
-/* Called with local BH disabled and the pool write lock held.
+/* Called with local BH disabled and the pool lock held.
* Variable names are the proof of operation correctness.
- * Look into mm/map_avl.c for more detail description of the ideas. */
+ * Look into mm/map_avl.c for more detail description of the ideas.
+ */
static void peer_avl_rebalance(struct inet_peer **stack[],
struct inet_peer ***stackend)
{
@@ -254,15 +299,21 @@ static void peer_avl_rebalance(struct inet_peer **stack[],
}
}
-/* Called with local BH disabled and the pool write lock held. */
+/* Called with local BH disabled and the pool lock held. */
#define link_to_pool(n) \
do { \
n->avl_height = 1; \
n->avl_left = peer_avl_empty; \
n->avl_right = peer_avl_empty; \
+ smp_wmb(); /* lockless readers can catch us now */ \
**--stackptr = n; \
peer_avl_rebalance(stack, stackptr); \
-} while(0)
+} while (0)
+
+static void inetpeer_free_rcu(struct rcu_head *head)
+{
+ kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
+}
/* May be called with local BH enabled. */
static void unlink_from_pool(struct inet_peer *p)
@@ -271,13 +322,14 @@ static void unlink_from_pool(struct inet_peer *p)
do_free = 0;
- write_lock_bh(&peer_pool_lock);
+ spin_lock_bh(&peers.lock);
/* Check the reference counter. It was artificially incremented by 1
- * in cleanup() function to prevent sudden disappearing. If the
- * reference count is still 1 then the node is referenced only as `p'
- * here and from the pool. So under the exclusive pool lock it's safe
- * to remove the node and free it later. */
- if (atomic_read(&p->refcnt) == 1) {
+ * in cleanup() function to prevent sudden disappearing. If we can
+ * atomically (because of lockless readers) take this last reference,
+ * it's safe to remove the node and free it later.
+ * We use refcnt=-1 to alert lockless readers this entry is deleted.
+ */
+ if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
struct inet_peer **stack[PEER_MAXDEPTH];
struct inet_peer ***stackptr, ***delp;
if (lookup(p->v4daddr, stack) != p)
@@ -303,20 +355,21 @@ static void unlink_from_pool(struct inet_peer *p)
delp[1] = &t->avl_left; /* was &p->avl_left */
}
peer_avl_rebalance(stack, stackptr);
- peer_total--;
+ peers.total--;
do_free = 1;
}
- write_unlock_bh(&peer_pool_lock);
+ spin_unlock_bh(&peers.lock);
if (do_free)
- kmem_cache_free(peer_cachep, p);
+ call_rcu_bh(&p->rcu, inetpeer_free_rcu);
else
/* The node is used again. Decrease the reference counter
* back. The loop "cleanup -> unlink_from_unused
* -> unlink_from_pool -> putpeer -> link_to_unused
* -> cleanup (for the same node)"
* doesn't really exist because the entry will have a
- * recent deletion time and will not be cleaned again soon. */
+ * recent deletion time and will not be cleaned again soon.
+ */
inet_putpeer(p);
}
@@ -326,16 +379,16 @@ static int cleanup_once(unsigned long ttl)
struct inet_peer *p = NULL;
/* Remove the first entry from the list of unused nodes. */
- spin_lock_bh(&inet_peer_unused_lock);
- if (!list_empty(&unused_peers)) {
+ spin_lock_bh(&unused_peers.lock);
+ if (!list_empty(&unused_peers.list)) {
__u32 delta;
- p = list_first_entry(&unused_peers, struct inet_peer, unused);
+ p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
delta = (__u32)jiffies - p->dtime;
if (delta < ttl) {
/* Do not prune fresh entries. */
- spin_unlock_bh(&inet_peer_unused_lock);
+ spin_unlock_bh(&unused_peers.lock);
return -1;
}
@@ -345,7 +398,7 @@ static int cleanup_once(unsigned long ttl)
* before unlink_from_pool() call. */
atomic_inc(&p->refcnt);
}
- spin_unlock_bh(&inet_peer_unused_lock);
+ spin_unlock_bh(&unused_peers.lock);
if (p == NULL)
/* It means that the total number of USED entries has
@@ -360,62 +413,56 @@ static int cleanup_once(unsigned long ttl)
/* Called with or without local BH being disabled. */
struct inet_peer *inet_getpeer(__be32 daddr, int create)
{
- struct inet_peer *p, *n;
+ struct inet_peer *p;
struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
- /* Look up for the address quickly. */
- read_lock_bh(&peer_pool_lock);
- p = lookup(daddr, NULL);
- if (p != peer_avl_empty)
- atomic_inc(&p->refcnt);
- read_unlock_bh(&peer_pool_lock);
+ /* Look up for the address quickly, lockless.
+ * Because of a concurrent writer, we might not find an existing entry.
+ */
+ rcu_read_lock_bh();
+ p = lookup_rcu_bh(daddr);
+ rcu_read_unlock_bh();
+
+ if (p) {
+ /* The existing node has been found.
+ * Remove the entry from unused list if it was there.
+ */
+ unlink_from_unused(p);
+ return p;
+ }
+ /* retry an exact lookup, taking the lock before.
+ * At least, nodes should be hot in our cache.
+ */
+ spin_lock_bh(&peers.lock);
+ p = lookup(daddr, stack);
if (p != peer_avl_empty) {
- /* The existing node has been found. */
+ atomic_inc(&p->refcnt);
+ spin_unlock_bh(&peers.lock);
/* Remove the entry from unused list if it was there. */
unlink_from_unused(p);
return p;
}
+ p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
+ if (p) {
+ p->v4daddr = daddr;
+ atomic_set(&p->refcnt, 1);
+ atomic_set(&p->rid, 0);
+ atomic_set(&p->ip_id_count, secure_ip_id(daddr));
+ p->tcp_ts_stamp = 0;
+ INIT_LIST_HEAD(&p->unused);
+
+
+ /* Link the node. */
+ link_to_pool(p);
+ peers.total++;
+ }
+ spin_unlock_bh(&peers.lock);
- if (!create)
- return NULL;
-
- /* Allocate the space outside the locked region. */
- n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
- if (n == NULL)
- return NULL;
- n->v4daddr = daddr;
- atomic_set(&n->refcnt, 1);
- atomic_set(&n->rid, 0);
- atomic_set(&n->ip_id_count, secure_ip_id(daddr));
- n->tcp_ts_stamp = 0;
-
- write_lock_bh(&peer_pool_lock);
- /* Check if an entry has suddenly appeared. */
- p = lookup(daddr, stack);
- if (p != peer_avl_empty)
- goto out_free;
-
- /* Link the node. */
- link_to_pool(n);
- INIT_LIST_HEAD(&n->unused);
- peer_total++;
- write_unlock_bh(&peer_pool_lock);
-
- if (peer_total >= inet_peer_threshold)
+ if (peers.total >= inet_peer_threshold)
/* Remove one less-recently-used entry. */
cleanup_once(0);
- return n;
-
-out_free:
- /* The appropriate node is already in the pool. */
- atomic_inc(&p->refcnt);
- write_unlock_bh(&peer_pool_lock);
- /* Remove the entry from unused list if it was there. */
- unlink_from_unused(p);
- /* Free preallocated the preallocated node. */
- kmem_cache_free(peer_cachep, n);
return p;
}
@@ -425,12 +472,12 @@ static void peer_check_expire(unsigned long dummy)
unsigned long now = jiffies;
int ttl;
- if (peer_total >= inet_peer_threshold)
+ if (peers.total >= inet_peer_threshold)
ttl = inet_peer_minttl;
else
ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
- peer_total / inet_peer_threshold * HZ;
+ peers.total / inet_peer_threshold * HZ;
while (!cleanup_once(ttl)) {
if (jiffies != now)
break;
@@ -439,22 +486,25 @@ static void peer_check_expire(unsigned long dummy)
/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
* interval depending on the total number of entries (more entries,
* less interval). */
- if (peer_total >= inet_peer_threshold)
+ if (peers.total >= inet_peer_threshold)
peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
else
peer_periodic_timer.expires = jiffies
+ inet_peer_gc_maxtime
- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
- peer_total / inet_peer_threshold * HZ;
+ peers.total / inet_peer_threshold * HZ;
add_timer(&peer_periodic_timer);
}
void inet_putpeer(struct inet_peer *p)
{
- spin_lock_bh(&inet_peer_unused_lock);
- if (atomic_dec_and_test(&p->refcnt)) {
- list_add_tail(&p->unused, &unused_peers);
+ local_bh_disable();
+
+ if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
+ list_add_tail(&p->unused, &unused_peers.list);
p->dtime = (__u32)jiffies;
+ spin_unlock(&unused_peers.lock);
}
- spin_unlock_bh(&inet_peer_unused_lock);
+
+ local_bh_enable();
}
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 56cdf68a074c..99461f09320f 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -87,16 +87,16 @@ int ip_forward(struct sk_buff *skb)
if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
goto sr_failed;
- if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) &&
+ if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
(ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
- IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS);
+ IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
- htonl(dst_mtu(&rt->u.dst)));
+ htonl(dst_mtu(&rt->dst)));
goto drop;
}
/* We are about to mangle packet. Copy it! */
- if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len))
+ if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len))
goto drop;
iph = ip_hdr(skb);
@@ -113,7 +113,7 @@ int ip_forward(struct sk_buff *skb)
skb->priority = rt_tos2priority(iph->tos);
return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
- rt->u.dst.dev, ip_forward_finish);
+ rt->dst.dev, ip_forward_finish);
sr_failed:
/*
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 75347ea70ea0..858d34648eee 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -124,11 +124,8 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a)
}
/* Memory Tracking Functions. */
-static __inline__ void frag_kfree_skb(struct netns_frags *nf,
- struct sk_buff *skb, int *work)
+static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
{
- if (work)
- *work -= skb->truesize;
atomic_sub(skb->truesize, &nf->mem);
kfree_skb(skb);
}
@@ -309,7 +306,7 @@ static int ip_frag_reinit(struct ipq *qp)
fp = qp->q.fragments;
do {
struct sk_buff *xp = fp->next;
- frag_kfree_skb(qp->q.net, fp, NULL);
+ frag_kfree_skb(qp->q.net, fp);
fp = xp;
} while (fp);
@@ -446,7 +443,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
qp->q.fragments = next;
qp->q.meat -= free_it->len;
- frag_kfree_skb(qp->q.net, free_it, NULL);
+ frag_kfree_skb(qp->q.net, free_it);
}
}
@@ -556,7 +553,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
- atomic_sub(head->truesize, &qp->q.net->mem);
for (fp=head->next; fp; fp = fp->next) {
head->data_len += fp->len;
@@ -566,8 +562,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
- atomic_sub(fp->truesize, &qp->q.net->mem);
}
+ atomic_sub(head->truesize, &qp->q.net->mem);
head->next = NULL;
head->dev = dev;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 32618e11076d..749e54889e82 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -745,7 +745,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
goto tx_error;
}
}
- tdev = rt->u.dst.dev;
+ tdev = rt->dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
@@ -755,7 +755,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
df = tiph->frag_off;
if (df)
- mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen;
+ mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
@@ -803,7 +803,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
tunnel->err_count = 0;
}
- max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len;
+ max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
@@ -830,7 +830,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/*
* Push down and install the IPIP header.
@@ -853,7 +853,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
#endif
else
- iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
+ iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
}
((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
@@ -915,7 +915,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
.proto = IPPROTO_GRE };
struct rtable *rt;
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
- tdev = rt->u.dst.dev;
+ tdev = rt->dst.dev;
ip_rt_put(rt);
}
@@ -1174,7 +1174,7 @@ static int ipgre_open(struct net_device *dev)
struct rtable *rt;
if (ip_route_output_key(dev_net(dev), &rt, &fl))
return -EADDRNOTAVAIL;
- dev = rt->u.dst.dev;
+ dev = rt->dst.dev;
ip_rt_put(rt);
if (__in_dev_get_rtnl(dev) == NULL)
return -EADDRNOTAVAIL;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d930dc5e4d85..db47a5a00ed2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -146,7 +146,7 @@
#include <linux/netlink.h>
/*
- * Process Router Attention IP option
+ * Process Router Attention IP option (RFC 2113)
*/
int ip_call_ra_chain(struct sk_buff *skb)
{
@@ -155,8 +155,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
struct sock *last = NULL;
struct net_device *dev = skb->dev;
- read_lock(&ip_ra_lock);
- for (ra = ip_ra_chain; ra; ra = ra->next) {
+ for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
struct sock *sk = ra->sk;
/* If socket is bound to an interface, only report
@@ -167,10 +166,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
sk->sk_bound_dev_if == dev->ifindex) &&
net_eq(sock_net(sk), dev_net(dev))) {
if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
- if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) {
- read_unlock(&ip_ra_lock);
+ if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
return 1;
- }
}
if (last) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
@@ -183,10 +180,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
if (last) {
raw_rcv(last, skb);
- read_unlock(&ip_ra_lock);
return 1;
}
- read_unlock(&ip_ra_lock);
return 0;
}
@@ -298,18 +293,16 @@ static inline int ip_rcv_options(struct sk_buff *skb)
}
if (unlikely(opt->srr)) {
- struct in_device *in_dev = in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+
if (in_dev) {
if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
if (IN_DEV_LOG_MARTIANS(in_dev) &&
net_ratelimit())
printk(KERN_INFO "source route option %pI4 -> %pI4\n",
&iph->saddr, &iph->daddr);
- in_dev_put(in_dev);
goto drop;
}
-
- in_dev_put(in_dev);
}
if (ip_options_rcv_srr(skb))
@@ -340,6 +333,9 @@ static int ip_rcv_finish(struct sk_buff *skb)
else if (err == -ENETUNREACH)
IP_INC_STATS_BH(dev_net(skb->dev),
IPSTATS_MIB_INNOROUTES);
+ else if (err == -EXDEV)
+ NET_INC_STATS_BH(dev_net(skb->dev),
+ LINUX_MIB_IPRPFILTER);
goto drop;
}
}
@@ -360,10 +356,10 @@ static int ip_rcv_finish(struct sk_buff *skb)
rt = skb_rtable(skb);
if (rt->rt_type == RTN_MULTICAST) {
- IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST,
+ IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST,
skb->len);
} else if (rt->rt_type == RTN_BROADCAST)
- IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCAST,
+ IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST,
skb->len);
return dst_input(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 041d41df1224..7d1f4b4481a9 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -151,15 +151,15 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
- if (ip_dont_fragment(sk, &rt->u.dst))
+ if (ip_dont_fragment(sk, &rt->dst))
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
- iph->ttl = ip_select_ttl(inet, &rt->u.dst);
+ iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
iph->protocol = sk->sk_protocol;
- ip_select_ident(iph, &rt->u.dst, sk);
+ ip_select_ident(iph, &rt->dst, sk);
if (opt && opt->optlen) {
iph->ihl += opt->optlen>>2;
@@ -240,7 +240,7 @@ int ip_mc_output(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct rtable *rt = skb_rtable(skb);
- struct net_device *dev = rt->u.dst.dev;
+ struct net_device *dev = rt->dst.dev;
/*
* If the indicated interface is up and running, send the packet.
@@ -359,9 +359,9 @@ int ip_queue_xmit(struct sk_buff *skb)
if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
goto no_route;
}
- sk_setup_caps(sk, &rt->u.dst);
+ sk_setup_caps(sk, &rt->dst);
}
- skb_dst_set_noref(skb, &rt->u.dst);
+ skb_dst_set_noref(skb, &rt->dst);
packet_routed:
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
@@ -372,11 +372,11 @@ packet_routed:
skb_reset_network_header(skb);
iph = ip_hdr(skb);
*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
- if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df)
+ if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
- iph->ttl = ip_select_ttl(inet, &rt->u.dst);
+ iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
@@ -387,7 +387,7 @@ packet_routed:
ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
}
- ip_select_ident_more(iph, &rt->u.dst, sk,
+ ip_select_ident_more(iph, &rt->dst, sk,
(skb_shinfo(skb)->gso_segs ?: 1) - 1);
skb->priority = sk->sk_priority;
@@ -452,7 +452,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
struct rtable *rt = skb_rtable(skb);
int err = 0;
- dev = rt->u.dst.dev;
+ dev = rt->dst.dev;
/*
* Point into the IP datagram header.
@@ -473,7 +473,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
*/
hlen = iph->ihl * 4;
- mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
+ mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge)
mtu -= nf_bridge_mtu_reduction(skb);
@@ -586,7 +586,7 @@ slow_path:
* we need to make room for the encapsulating header
*/
pad = nf_bridge_pad(skb);
- ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
+ ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, pad);
mtu -= pad;
/*
@@ -833,13 +833,13 @@ int ip_append_data(struct sock *sk,
*/
*rtp = NULL;
inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
- rt->u.dst.dev->mtu :
- dst_mtu(rt->u.dst.path);
- inet->cork.dst = &rt->u.dst;
+ rt->dst.dev->mtu :
+ dst_mtu(rt->dst.path);
+ inet->cork.dst = &rt->dst;
inet->cork.length = 0;
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
- if ((exthdrlen = rt->u.dst.header_len) != 0) {
+ if ((exthdrlen = rt->dst.header_len) != 0) {
length += exthdrlen;
transhdrlen += exthdrlen;
}
@@ -852,7 +852,7 @@ int ip_append_data(struct sock *sk,
exthdrlen = 0;
mtu = inet->cork.fragsize;
}
- hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
+ hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
@@ -869,7 +869,7 @@ int ip_append_data(struct sock *sk,
*/
if (transhdrlen &&
length + fragheaderlen <= mtu &&
- rt->u.dst.dev->features & NETIF_F_V4_CSUM &&
+ rt->dst.dev->features & NETIF_F_V4_CSUM &&
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
@@ -878,7 +878,7 @@ int ip_append_data(struct sock *sk,
inet->cork.length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->u.dst.dev->features & NETIF_F_UFO)) {
+ (rt->dst.dev->features & NETIF_F_UFO)) {
err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
fragheaderlen, transhdrlen, mtu,
flags);
@@ -926,7 +926,7 @@ alloc_new_skb:
fraglen = datalen + fragheaderlen;
if ((flags & MSG_MORE) &&
- !(rt->u.dst.dev->features&NETIF_F_SG))
+ !(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
alloclen = datalen + fragheaderlen;
@@ -937,7 +937,7 @@ alloc_new_skb:
* the last.
*/
if (datalen == length + fraggap)
- alloclen += rt->u.dst.trailer_len;
+ alloclen += rt->dst.trailer_len;
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
@@ -1010,7 +1010,7 @@ alloc_new_skb:
if (copy > length)
copy = length;
- if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
+ if (!(rt->dst.dev->features&NETIF_F_SG)) {
unsigned int off;
off = skb->len;
@@ -1105,10 +1105,10 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
if (inet->cork.flags & IPCORK_OPT)
opt = inet->cork.opt;
- if (!(rt->u.dst.dev->features&NETIF_F_SG))
+ if (!(rt->dst.dev->features&NETIF_F_SG))
return -EOPNOTSUPP;
- hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
+ hh_len = LL_RESERVED_SPACE(rt->dst.dev);
mtu = inet->cork.fragsize;
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
@@ -1125,7 +1125,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
inet->cork.length += size;
if ((size + skb->len > mtu) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->u.dst.dev->features & NETIF_F_UFO)) {
+ (rt->dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
}
@@ -1277,8 +1277,8 @@ int ip_push_pending_frames(struct sock *sk)
* If local_df is set too, we still allow to fragment this frame
* locally. */
if (inet->pmtudisc >= IP_PMTUDISC_DO ||
- (skb->len <= dst_mtu(&rt->u.dst) &&
- ip_dont_fragment(sk, &rt->u.dst)))
+ (skb->len <= dst_mtu(&rt->dst) &&
+ ip_dont_fragment(sk, &rt->dst)))
df = htons(IP_DF);
if (inet->cork.flags & IPCORK_OPT)
@@ -1287,7 +1287,7 @@ int ip_push_pending_frames(struct sock *sk)
if (rt->rt_type == RTN_MULTICAST)
ttl = inet->mc_ttl;
else
- ttl = ip_select_ttl(inet, &rt->u.dst);
+ ttl = ip_select_ttl(inet, &rt->dst);
iph = (struct iphdr *)skb->data;
iph->version = 4;
@@ -1298,7 +1298,7 @@ int ip_push_pending_frames(struct sock *sk)
}
iph->tos = inet->tos;
iph->frag_off = df;
- ip_select_ident(iph, &rt->u.dst, sk);
+ ip_select_ident(iph, &rt->dst, sk);
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
@@ -1311,7 +1311,7 @@ int ip_push_pending_frames(struct sock *sk)
* on dst refcount
*/
inet->cork.dst = NULL;
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ce231780a2b1..6c40a8c46e79 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -239,7 +239,16 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
sent to multicast group to reach destination designated router.
*/
struct ip_ra_chain *ip_ra_chain;
-DEFINE_RWLOCK(ip_ra_lock);
+static DEFINE_SPINLOCK(ip_ra_lock);
+
+
+static void ip_ra_destroy_rcu(struct rcu_head *head)
+{
+ struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
+
+ sock_put(ra->saved_sk);
+ kfree(ra);
+}
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *))
@@ -251,35 +260,42 @@ int ip_ra_control(struct sock *sk, unsigned char on,
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
- write_lock_bh(&ip_ra_lock);
+ spin_lock_bh(&ip_ra_lock);
for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
if (ra->sk == sk) {
if (on) {
- write_unlock_bh(&ip_ra_lock);
+ spin_unlock_bh(&ip_ra_lock);
kfree(new_ra);
return -EADDRINUSE;
}
- *rap = ra->next;
- write_unlock_bh(&ip_ra_lock);
+ /* dont let ip_call_ra_chain() use sk again */
+ ra->sk = NULL;
+ rcu_assign_pointer(*rap, ra->next);
+ spin_unlock_bh(&ip_ra_lock);
if (ra->destructor)
ra->destructor(sk);
- sock_put(sk);
- kfree(ra);
+ /*
+ * Delay sock_put(sk) and kfree(ra) after one rcu grace
+ * period. This guarantee ip_call_ra_chain() dont need
+ * to mess with socket refcounts.
+ */
+ ra->saved_sk = sk;
+ call_rcu(&ra->rcu, ip_ra_destroy_rcu);
return 0;
}
}
if (new_ra == NULL) {
- write_unlock_bh(&ip_ra_lock);
+ spin_unlock_bh(&ip_ra_lock);
return -ENOBUFS;
}
new_ra->sk = sk;
new_ra->destructor = destructor;
new_ra->next = ra;
- *rap = new_ra;
+ rcu_assign_pointer(*rap, new_ra);
sock_hold(sk);
- write_unlock_bh(&ip_ra_lock);
+ spin_unlock_bh(&ip_ra_lock);
return 0;
}
@@ -449,7 +465,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
(1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
- (1<<IP_MINTTL))) ||
+ (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
@@ -572,6 +588,13 @@ static int do_ip_setsockopt(struct sock *sk, int level,
}
inet->hdrincl = val ? 1 : 0;
break;
+ case IP_NODEFRAG:
+ if (sk->sk_type != SOCK_RAW) {
+ err = -ENOPROTOOPT;
+ break;
+ }
+ inet->nodefrag = val ? 1 : 0;
+ break;
case IP_MTU_DISCOVER:
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
goto e_inval;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index b9d84e800cf4..3a6e1ec5e9ae 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -665,6 +665,13 @@ ic_dhcp_init_options(u8 *options)
memcpy(e, ic_req_params, sizeof(ic_req_params));
e += sizeof(ic_req_params);
+ if (ic_host_name_set) {
+ *e++ = 12; /* host-name */
+ len = strlen(utsname()->nodename);
+ *e++ = len;
+ memcpy(e, utsname()->nodename, len);
+ e += len;
+ }
if (*vendor_class_identifier) {
printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n",
vendor_class_identifier);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 7fd636711037..ec036731a70b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -435,7 +435,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error_icmp;
}
}
- tdev = rt->u.dst.dev;
+ tdev = rt->dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
@@ -446,7 +446,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
df |= old_iph->frag_off & htons(IP_DF);
if (df) {
- mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
+ mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
stats->collisions++;
@@ -503,7 +503,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/*
* Push down and install the IPIP header.
@@ -552,7 +552,7 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
.proto = IPPROTO_IPIP };
struct rtable *rt;
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
- tdev = rt->u.dst.dev;
+ tdev = rt->dst.dev;
ip_rt_put(rt);
}
dev->flags |= IFF_POINTOPOINT;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 757f25eb9b4b..539592294f45 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1553,9 +1553,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
goto out_free;
}
- dev = rt->u.dst.dev;
+ dev = rt->dst.dev;
- if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
+ if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
allow to send ICMP, so that packets will disappear
to blackhole.
@@ -1566,7 +1566,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
goto out_free;
}
- encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
+ encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
if (skb_cow(skb, encap)) {
ip_rt_put(rt);
@@ -1577,7 +1577,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
vif->bytes_out += skb->len;
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
ip_decrease_ttl(ip_hdr(skb));
/* FIXME: forward and output firewalls used to be called here.
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 07de855e2175..d88a46c54fd1 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -43,7 +43,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
/* Drop old route. */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
} else {
/* non-local src, find valid iif to satisfy
* rp-filter when calling ip_route_input. */
@@ -53,11 +53,11 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
orefdst = skb->_skb_refdst;
if (ip_route_input(skb, iph->daddr, iph->saddr,
- RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
- dst_release(&rt->u.dst);
+ RT_TOS(iph->tos), rt->dst.dev) != 0) {
+ dst_release(&rt->dst);
return -1;
}
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
refdst_drop(orefdst);
}
@@ -212,9 +212,7 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
skb->len - dataoff, 0);
skb->ip_summed = CHECKSUM_NONE;
- csum = __skb_checksum_complete_head(skb, dataoff + len);
- if (!csum)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return __skb_checksum_complete_head(skb, dataoff + len);
}
return csum;
}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 1ac01b128621..16c0ba0a2728 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -758,7 +758,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
* about).
*/
countersize = sizeof(struct xt_counters) * private->number;
- counters = vmalloc_node(countersize, numa_node_id());
+ counters = vmalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
@@ -1005,8 +1005,7 @@ static int __do_replace(struct net *net, const char *name,
struct arpt_entry *iter;
ret = 0;
- counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
- numa_node_id());
+ counters = vmalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
@@ -1159,7 +1158,7 @@ static int do_add_counters(struct net *net, const void __user *user,
if (len != size + num_counters * sizeof(struct xt_counters))
return -EINVAL;
- paddc = vmalloc_node(len - size, numa_node_id());
+ paddc = vmalloc(len - size);
if (!paddc)
return -ENOMEM;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index a4e5fc5df4bf..d2c1311cb28d 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
-static DEFINE_RWLOCK(queue_lock);
+static DEFINE_SPINLOCK(queue_lock);
static int peer_pid __read_mostly;
static unsigned int copy_range __read_mostly;
static unsigned int queue_total;
@@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
break;
case IPQ_COPY_PACKET:
- copy_mode = mode;
+ if (range > 0xFFFF)
+ range = 0xFFFF;
copy_range = range;
- if (copy_range > 0xFFFF)
- copy_range = 0xFFFF;
+ copy_mode = mode;
break;
default:
@@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id)
{
struct nf_queue_entry *entry = NULL, *i;
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
list_for_each_entry(i, &queue_list, list) {
if ((unsigned long)i == id) {
@@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id)
queue_total--;
}
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return entry;
}
@@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
static void
ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
{
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
__ipq_flush(cmpfn, data);
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
}
static struct sk_buff *
@@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
struct nlmsghdr *nlh;
struct timeval tv;
- read_lock_bh(&queue_lock);
-
- switch (copy_mode) {
+ switch (ACCESS_ONCE(copy_mode)) {
case IPQ_COPY_META:
case IPQ_COPY_NONE:
size = NLMSG_SPACE(sizeof(*pmsg));
@@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
case IPQ_COPY_PACKET:
if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
- (*errp = skb_checksum_help(entry->skb))) {
- read_unlock_bh(&queue_lock);
+ (*errp = skb_checksum_help(entry->skb)))
return NULL;
- }
- if (copy_range == 0 || copy_range > entry->skb->len)
+
+ data_len = ACCESS_ONCE(copy_range);
+ if (data_len == 0 || data_len > entry->skb->len)
data_len = entry->skb->len;
- else
- data_len = copy_range;
size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
break;
default:
*errp = -EINVAL;
- read_unlock_bh(&queue_lock);
return NULL;
}
- read_unlock_bh(&queue_lock);
-
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
if (nskb == NULL)
return status;
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
if (!peer_pid)
goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
__ipq_enqueue_entry(entry);
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return status;
err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return status;
}
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
{
int status;
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
status = __ipq_set_mode(mode, range);
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return status;
}
@@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
if (security_netlink_recv(skb, CAP_NET_ADMIN))
RCV_SKB_FAIL(-EPERM);
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
if (peer_pid) {
if (peer_pid != pid) {
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
RCV_SKB_FAIL(-EBUSY);
}
} else {
@@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
peer_pid = pid;
}
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
status = ipq_receive_peer(NLMSG_DATA(nlh), type,
nlmsglen - NLMSG_LENGTH(0));
@@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
struct netlink_notify *n = ptr;
if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
__ipq_reset();
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
}
return NOTIFY_DONE;
}
@@ -527,7 +520,7 @@ static ctl_table ipq_table[] = {
#ifdef CONFIG_PROC_FS
static int ip_queue_show(struct seq_file *m, void *v)
{
- read_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
seq_printf(m,
"Peer PID : %d\n"
@@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v)
queue_dropped,
queue_user_dropped);
- read_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return 0;
}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 4b6c5ca610fc..b38c11810c65 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -928,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care
about). */
countersize = sizeof(struct xt_counters) * private->number;
- counters = vmalloc_node(countersize, numa_node_id());
+ counters = vmalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
@@ -1352,7 +1352,7 @@ do_add_counters(struct net *net, const void __user *user,
if (len != size + num_counters * sizeof(struct xt_counters))
return -EINVAL;
- paddc = vmalloc_node(len - size, numa_node_id());
+ paddc = vmalloc(len - size);
if (!paddc)
return -ENOMEM;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index f91c94b9a790..64d0875f5192 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -53,12 +53,13 @@ struct clusterip_config {
#endif
enum clusterip_hashmode hash_mode; /* which hashing mode */
u_int32_t hash_initval; /* hash initialization */
+ struct rcu_head rcu;
};
static LIST_HEAD(clusterip_configs);
/* clusterip_lock protects the clusterip_configs list */
-static DEFINE_RWLOCK(clusterip_lock);
+static DEFINE_SPINLOCK(clusterip_lock);
#ifdef CONFIG_PROC_FS
static const struct file_operations clusterip_proc_fops;
@@ -71,11 +72,17 @@ clusterip_config_get(struct clusterip_config *c)
atomic_inc(&c->refcount);
}
+
+static void clusterip_config_rcu_free(struct rcu_head *head)
+{
+ kfree(container_of(head, struct clusterip_config, rcu));
+}
+
static inline void
clusterip_config_put(struct clusterip_config *c)
{
if (atomic_dec_and_test(&c->refcount))
- kfree(c);
+ call_rcu_bh(&c->rcu, clusterip_config_rcu_free);
}
/* decrease the count of entries using/referencing this config. If last
@@ -84,10 +91,11 @@ clusterip_config_put(struct clusterip_config *c)
static inline void
clusterip_config_entry_put(struct clusterip_config *c)
{
- write_lock_bh(&clusterip_lock);
- if (atomic_dec_and_test(&c->entries)) {
- list_del(&c->list);
- write_unlock_bh(&clusterip_lock);
+ local_bh_disable();
+ if (atomic_dec_and_lock(&c->entries, &clusterip_lock)) {
+ list_del_rcu(&c->list);
+ spin_unlock(&clusterip_lock);
+ local_bh_enable();
dev_mc_del(c->dev, c->clustermac);
dev_put(c->dev);
@@ -100,7 +108,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
#endif
return;
}
- write_unlock_bh(&clusterip_lock);
+ local_bh_enable();
}
static struct clusterip_config *
@@ -108,7 +116,7 @@ __clusterip_config_find(__be32 clusterip)
{
struct clusterip_config *c;
- list_for_each_entry(c, &clusterip_configs, list) {
+ list_for_each_entry_rcu(c, &clusterip_configs, list) {
if (c->clusterip == clusterip)
return c;
}
@@ -121,16 +129,15 @@ clusterip_config_find_get(__be32 clusterip, int entry)
{
struct clusterip_config *c;
- read_lock_bh(&clusterip_lock);
+ rcu_read_lock_bh();
c = __clusterip_config_find(clusterip);
- if (!c) {
- read_unlock_bh(&clusterip_lock);
- return NULL;
+ if (c) {
+ if (unlikely(!atomic_inc_not_zero(&c->refcount)))
+ c = NULL;
+ else if (entry)
+ atomic_inc(&c->entries);
}
- atomic_inc(&c->refcount);
- if (entry)
- atomic_inc(&c->entries);
- read_unlock_bh(&clusterip_lock);
+ rcu_read_unlock_bh();
return c;
}
@@ -181,9 +188,9 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
}
#endif
- write_lock_bh(&clusterip_lock);
- list_add(&c->list, &clusterip_configs);
- write_unlock_bh(&clusterip_lock);
+ spin_lock_bh(&clusterip_lock);
+ list_add_rcu(&c->list, &clusterip_configs);
+ spin_unlock_bh(&clusterip_lock);
return c;
}
@@ -733,6 +740,9 @@ static void __exit clusterip_tg_exit(void)
#endif
nf_unregister_hook(&cip_arp_ops);
xt_unregister_target(&clusterip_tg_reg);
+
+ /* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */
+ rcu_barrier_bh();
}
module_init(clusterip_tg_init);
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index cb763ae9ed90..eab8de32f200 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,6 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
+ struct inet_sock *inet = inet_sk(skb->sk);
+
+ if (inet && inet->nodefrag)
+ return NF_ACCEPT;
+
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
/* Previously seen (loopback)? Ignore. Do this before
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 4f8bddb760c9..c7719b283ada 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -742,7 +742,7 @@ static int __init nf_nat_init(void)
spin_unlock_bh(&nf_nat_lock);
/* Initialize fake conntrack so that NAT will skip it */
- nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
+ nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index beb25819c9c9..6723c682250d 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -98,7 +98,7 @@ nf_nat_fn(unsigned int hooknum,
return NF_ACCEPT;
/* Don't try to NAT if this packet is not conntracked */
- if (ct == &nf_conntrack_untracked)
+ if (nf_ct_is_untracked(ct))
return NF_ACCEPT;
nat = nfct_nat(ct);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 3dc9914c1dce..e320ca6b3ef3 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -252,6 +252,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
+ SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 2c7a1639388a..009a7b2aa1ef 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -314,7 +314,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
}
static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
- struct rtable *rt,
+ struct rtable **rtp,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
@@ -323,25 +323,27 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
struct sk_buff *skb;
unsigned int iphlen;
int err;
+ struct rtable *rt = *rtp;
- if (length > rt->u.dst.dev->mtu) {
+ if (length > rt->dst.dev->mtu) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
- rt->u.dst.dev->mtu);
+ rt->dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
skb = sock_alloc_send_skb(sk,
- length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
+ length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
- skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));
+ skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- skb_dst_set(skb, dst_clone(&rt->u.dst));
+ skb_dst_set(skb, &rt->dst);
+ *rtp = NULL;
skb_reset_network_header(skb);
iph = ip_hdr(skb);
@@ -373,7 +375,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
iph->check = 0;
iph->tot_len = htons(length);
if (!iph->id)
- ip_select_ident(iph, &rt->u.dst, NULL);
+ ip_select_ident(iph, &rt->dst, NULL);
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
@@ -382,7 +384,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
skb_transport_header(skb))->type);
err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
- rt->u.dst.dev, dst_output);
+ rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
@@ -576,7 +578,7 @@ back_from_confirm:
if (inet->hdrincl)
err = raw_send_hdrinc(sk, msg->msg_iov, len,
- rt, msg->msg_flags);
+ &rt, msg->msg_flags);
else {
if (!ipc.addr)
@@ -604,7 +606,7 @@ out:
return len;
do_confirm:
- dst_confirm(&rt->u.dst);
+ dst_confirm(&rt->dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 560acc677ce4..03430de46166 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -253,8 +253,7 @@ static unsigned rt_hash_mask __read_mostly;
static unsigned int rt_hash_log __read_mostly;
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
-#define RT_CACHE_STAT_INC(field) \
- (__raw_get_cpu_var(rt_cache_stat).field++)
+#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
int genid)
@@ -287,10 +286,10 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
rcu_read_lock_bh();
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
while (r) {
- if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
+ if (dev_net(r->dst.dev) == seq_file_net(seq) &&
r->rt_genid == st->genid)
return r;
- r = rcu_dereference_bh(r->u.dst.rt_next);
+ r = rcu_dereference_bh(r->dst.rt_next);
}
rcu_read_unlock_bh();
}
@@ -302,7 +301,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
{
struct rt_cache_iter_state *st = seq->private;
- r = r->u.dst.rt_next;
+ r = r->dst.rt_next;
while (!r) {
rcu_read_unlock_bh();
do {
@@ -320,7 +319,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq,
{
struct rt_cache_iter_state *st = seq->private;
while ((r = __rt_cache_get_next(seq, r)) != NULL) {
- if (dev_net(r->u.dst.dev) != seq_file_net(seq))
+ if (dev_net(r->dst.dev) != seq_file_net(seq))
continue;
if (r->rt_genid == st->genid)
break;
@@ -378,19 +377,19 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
"%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
- r->u.dst.dev ? r->u.dst.dev->name : "*",
+ r->dst.dev ? r->dst.dev->name : "*",
(__force u32)r->rt_dst,
(__force u32)r->rt_gateway,
- r->rt_flags, atomic_read(&r->u.dst.__refcnt),
- r->u.dst.__use, 0, (__force u32)r->rt_src,
- (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
- (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
- dst_metric(&r->u.dst, RTAX_WINDOW),
- (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
- dst_metric(&r->u.dst, RTAX_RTTVAR)),
+ r->rt_flags, atomic_read(&r->dst.__refcnt),
+ r->dst.__use, 0, (__force u32)r->rt_src,
+ (dst_metric(&r->dst, RTAX_ADVMSS) ?
+ (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
+ dst_metric(&r->dst, RTAX_WINDOW),
+ (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
+ dst_metric(&r->dst, RTAX_RTTVAR)),
r->fl.fl4_tos,
- r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
- r->u.dst.hh ? (r->u.dst.hh->hh_output ==
+ r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
+ r->dst.hh ? (r->dst.hh->hh_output ==
dev_queue_xmit) : 0,
r->rt_spec_dst, &len);
@@ -609,13 +608,13 @@ static inline int ip_rt_proc_init(void)
static inline void rt_free(struct rtable *rt)
{
- call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+ call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
}
static inline void rt_drop(struct rtable *rt)
{
ip_rt_put(rt);
- call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
+ call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
}
static inline int rt_fast_clean(struct rtable *rth)
@@ -623,13 +622,13 @@ static inline int rt_fast_clean(struct rtable *rth)
/* Kill broadcast/multicast entries very aggresively, if they
collide in hash table with more useful entries */
return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
- rth->fl.iif && rth->u.dst.rt_next;
+ rth->fl.iif && rth->dst.rt_next;
}
static inline int rt_valuable(struct rtable *rth)
{
return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
- rth->u.dst.expires;
+ rth->dst.expires;
}
static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -637,15 +636,15 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t
unsigned long age;
int ret = 0;
- if (atomic_read(&rth->u.dst.__refcnt))
+ if (atomic_read(&rth->dst.__refcnt))
goto out;
ret = 1;
- if (rth->u.dst.expires &&
- time_after_eq(jiffies, rth->u.dst.expires))
+ if (rth->dst.expires &&
+ time_after_eq(jiffies, rth->dst.expires))
goto out;
- age = jiffies - rth->u.dst.lastuse;
+ age = jiffies - rth->dst.lastuse;
ret = 0;
if ((age <= tmo1 && !rt_fast_clean(rth)) ||
(age <= tmo2 && rt_valuable(rth)))
@@ -661,7 +660,7 @@ out: return ret;
*/
static inline u32 rt_score(struct rtable *rt)
{
- u32 score = jiffies - rt->u.dst.lastuse;
+ u32 score = jiffies - rt->dst.lastuse;
score = ~score & ~(3<<30);
@@ -701,12 +700,12 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
{
- return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev));
+ return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
}
static inline int rt_is_expired(struct rtable *rth)
{
- return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
+ return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
}
/*
@@ -735,7 +734,7 @@ static void rt_do_flush(int process_context)
rth = rt_hash_table[i].chain;
/* defer releasing the head of the list after spin_unlock */
- for (tail = rth; tail; tail = tail->u.dst.rt_next)
+ for (tail = rth; tail; tail = tail->dst.rt_next)
if (!rt_is_expired(tail))
break;
if (rth != tail)
@@ -744,9 +743,9 @@ static void rt_do_flush(int process_context)
/* call rt_free on entries after the tail requiring flush */
prev = &rt_hash_table[i].chain;
for (p = *prev; p; p = next) {
- next = p->u.dst.rt_next;
+ next = p->dst.rt_next;
if (!rt_is_expired(p)) {
- prev = &p->u.dst.rt_next;
+ prev = &p->dst.rt_next;
} else {
*prev = next;
rt_free(p);
@@ -761,7 +760,7 @@ static void rt_do_flush(int process_context)
spin_unlock_bh(rt_hash_lock_addr(i));
for (; rth != tail; rth = next) {
- next = rth->u.dst.rt_next;
+ next = rth->dst.rt_next;
rt_free(rth);
}
}
@@ -792,7 +791,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
while (aux != rth) {
if (compare_hash_inputs(&aux->fl, &rth->fl))
return 0;
- aux = aux->u.dst.rt_next;
+ aux = aux->dst.rt_next;
}
return ONE;
}
@@ -832,18 +831,18 @@ static void rt_check_expire(void)
length = 0;
spin_lock_bh(rt_hash_lock_addr(i));
while ((rth = *rthp) != NULL) {
- prefetch(rth->u.dst.rt_next);
+ prefetch(rth->dst.rt_next);
if (rt_is_expired(rth)) {
- *rthp = rth->u.dst.rt_next;
+ *rthp = rth->dst.rt_next;
rt_free(rth);
continue;
}
- if (rth->u.dst.expires) {
+ if (rth->dst.expires) {
/* Entry is expired even if it is in use */
- if (time_before_eq(jiffies, rth->u.dst.expires)) {
+ if (time_before_eq(jiffies, rth->dst.expires)) {
nofree:
tmo >>= 1;
- rthp = &rth->u.dst.rt_next;
+ rthp = &rth->dst.rt_next;
/*
* We only count entries on
* a chain with equal hash inputs once
@@ -859,7 +858,7 @@ nofree:
goto nofree;
/* Cleanup aged off entries. */
- *rthp = rth->u.dst.rt_next;
+ *rthp = rth->dst.rt_next;
rt_free(rth);
}
spin_unlock_bh(rt_hash_lock_addr(i));
@@ -1000,10 +999,10 @@ static int rt_garbage_collect(struct dst_ops *ops)
if (!rt_is_expired(rth) &&
!rt_may_expire(rth, tmo, expire)) {
tmo >>= 1;
- rthp = &rth->u.dst.rt_next;
+ rthp = &rth->dst.rt_next;
continue;
}
- *rthp = rth->u.dst.rt_next;
+ *rthp = rth->dst.rt_next;
rt_free(rth);
goal--;
}
@@ -1069,7 +1068,7 @@ static int slow_chain_length(const struct rtable *head)
while (rth) {
length += has_noalias(head, rth);
- rth = rth->u.dst.rt_next;
+ rth = rth->dst.rt_next;
}
return length >> FRACT_BITS;
}
@@ -1091,7 +1090,7 @@ restart:
candp = NULL;
now = jiffies;
- if (!rt_caching(dev_net(rt->u.dst.dev))) {
+ if (!rt_caching(dev_net(rt->dst.dev))) {
/*
* If we're not caching, just tell the caller we
* were successful and don't touch the route. The
@@ -1109,7 +1108,7 @@ restart:
*/
if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
- int err = arp_bind_neighbour(&rt->u.dst);
+ int err = arp_bind_neighbour(&rt->dst);
if (err) {
if (net_ratelimit())
printk(KERN_WARNING
@@ -1128,19 +1127,19 @@ restart:
spin_lock_bh(rt_hash_lock_addr(hash));
while ((rth = *rthp) != NULL) {
if (rt_is_expired(rth)) {
- *rthp = rth->u.dst.rt_next;
+ *rthp = rth->dst.rt_next;
rt_free(rth);
continue;
}
if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
/* Put it first */
- *rthp = rth->u.dst.rt_next;
+ *rthp = rth->dst.rt_next;
/*
* Since lookup is lockfree, the deletion
* must be visible to another weakly ordered CPU before
* the insertion at the start of the hash chain.
*/
- rcu_assign_pointer(rth->u.dst.rt_next,
+ rcu_assign_pointer(rth->dst.rt_next,
rt_hash_table[hash].chain);
/*
* Since lookup is lockfree, the update writes
@@ -1148,18 +1147,18 @@ restart:
*/
rcu_assign_pointer(rt_hash_table[hash].chain, rth);
- dst_use(&rth->u.dst, now);
+ dst_use(&rth->dst, now);
spin_unlock_bh(rt_hash_lock_addr(hash));
rt_drop(rt);
if (rp)
*rp = rth;
else
- skb_dst_set(skb, &rth->u.dst);
+ skb_dst_set(skb, &rth->dst);
return 0;
}
- if (!atomic_read(&rth->u.dst.__refcnt)) {
+ if (!atomic_read(&rth->dst.__refcnt)) {
u32 score = rt_score(rth);
if (score <= min_score) {
@@ -1171,7 +1170,7 @@ restart:
chain_length++;
- rthp = &rth->u.dst.rt_next;
+ rthp = &rth->dst.rt_next;
}
if (cand) {
@@ -1182,17 +1181,17 @@ restart:
* only 2 entries per bucket. We will see.
*/
if (chain_length > ip_rt_gc_elasticity) {
- *candp = cand->u.dst.rt_next;
+ *candp = cand->dst.rt_next;
rt_free(cand);
}
} else {
if (chain_length > rt_chain_length_max &&
slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
- struct net *net = dev_net(rt->u.dst.dev);
+ struct net *net = dev_net(rt->dst.dev);
int num = ++net->ipv4.current_rt_cache_rebuild_count;
if (!rt_caching(net)) {
printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
- rt->u.dst.dev->name, num);
+ rt->dst.dev->name, num);
}
rt_emergency_hash_rebuild(net);
spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1207,7 +1206,7 @@ restart:
route or unicast forwarding path.
*/
if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
- int err = arp_bind_neighbour(&rt->u.dst);
+ int err = arp_bind_neighbour(&rt->dst);
if (err) {
spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1238,14 +1237,14 @@ restart:
}
}
- rt->u.dst.rt_next = rt_hash_table[hash].chain;
+ rt->dst.rt_next = rt_hash_table[hash].chain;
#if RT_CACHE_DEBUG >= 2
- if (rt->u.dst.rt_next) {
+ if (rt->dst.rt_next) {
struct rtable *trt;
printk(KERN_DEBUG "rt_cache @%02x: %pI4",
hash, &rt->rt_dst);
- for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
+ for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
printk(" . %pI4", &trt->rt_dst);
printk("\n");
}
@@ -1263,7 +1262,7 @@ skip_hashing:
if (rp)
*rp = rt;
else
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
return 0;
}
@@ -1335,20 +1334,21 @@ static void rt_del(unsigned hash, struct rtable *rt)
ip_rt_put(rt);
while ((aux = *rthp) != NULL) {
if (aux == rt || rt_is_expired(aux)) {
- *rthp = aux->u.dst.rt_next;
+ *rthp = aux->dst.rt_next;
rt_free(aux);
continue;
}
- rthp = &aux->u.dst.rt_next;
+ rthp = &aux->dst.rt_next;
}
spin_unlock_bh(rt_hash_lock_addr(hash));
}
+/* called in rcu_read_lock() section */
void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
__be32 saddr, struct net_device *dev)
{
int i, k;
- struct in_device *in_dev = in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
struct rtable *rth, **rthp;
__be32 skeys[2] = { saddr, 0 };
int ikeys[2] = { dev->ifindex, 0 };
@@ -1384,7 +1384,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rthp=&rt_hash_table[hash].chain;
- rcu_read_lock();
while ((rth = rcu_dereference(*rthp)) != NULL) {
struct rtable *rt;
@@ -1393,44 +1392,42 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rth->fl.oif != ikeys[k] ||
rth->fl.iif != 0 ||
rt_is_expired(rth) ||
- !net_eq(dev_net(rth->u.dst.dev), net)) {
- rthp = &rth->u.dst.rt_next;
+ !net_eq(dev_net(rth->dst.dev), net)) {
+ rthp = &rth->dst.rt_next;
continue;
}
if (rth->rt_dst != daddr ||
rth->rt_src != saddr ||
- rth->u.dst.error ||
+ rth->dst.error ||
rth->rt_gateway != old_gw ||
- rth->u.dst.dev != dev)
+ rth->dst.dev != dev)
break;
- dst_hold(&rth->u.dst);
- rcu_read_unlock();
+ dst_hold(&rth->dst);
rt = dst_alloc(&ipv4_dst_ops);
if (rt == NULL) {
ip_rt_put(rth);
- in_dev_put(in_dev);
return;
}
/* Copy all the information. */
*rt = *rth;
- rt->u.dst.__use = 1;
- atomic_set(&rt->u.dst.__refcnt, 1);
- rt->u.dst.child = NULL;
- if (rt->u.dst.dev)
- dev_hold(rt->u.dst.dev);
+ rt->dst.__use = 1;
+ atomic_set(&rt->dst.__refcnt, 1);
+ rt->dst.child = NULL;
+ if (rt->dst.dev)
+ dev_hold(rt->dst.dev);
if (rt->idev)
in_dev_hold(rt->idev);
- rt->u.dst.obsolete = -1;
- rt->u.dst.lastuse = jiffies;
- rt->u.dst.path = &rt->u.dst;
- rt->u.dst.neighbour = NULL;
- rt->u.dst.hh = NULL;
+ rt->dst.obsolete = -1;
+ rt->dst.lastuse = jiffies;
+ rt->dst.path = &rt->dst;
+ rt->dst.neighbour = NULL;
+ rt->dst.hh = NULL;
#ifdef CONFIG_XFRM
- rt->u.dst.xfrm = NULL;
+ rt->dst.xfrm = NULL;
#endif
rt->rt_genid = rt_genid(net);
rt->rt_flags |= RTCF_REDIRECTED;
@@ -1439,23 +1436,23 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rt->rt_gateway = new_gw;
/* Redirect received -> path was valid */
- dst_confirm(&rth->u.dst);
+ dst_confirm(&rth->dst);
if (rt->peer)
atomic_inc(&rt->peer->refcnt);
- if (arp_bind_neighbour(&rt->u.dst) ||
- !(rt->u.dst.neighbour->nud_state &
+ if (arp_bind_neighbour(&rt->dst) ||
+ !(rt->dst.neighbour->nud_state &
NUD_VALID)) {
- if (rt->u.dst.neighbour)
- neigh_event_send(rt->u.dst.neighbour, NULL);
+ if (rt->dst.neighbour)
+ neigh_event_send(rt->dst.neighbour, NULL);
ip_rt_put(rth);
rt_drop(rt);
goto do_next;
}
- netevent.old = &rth->u.dst;
- netevent.new = &rt->u.dst;
+ netevent.old = &rth->dst;
+ netevent.new = &rt->dst;
call_netevent_notifiers(NETEVENT_REDIRECT,
&netevent);
@@ -1464,12 +1461,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
ip_rt_put(rt);
goto do_next;
}
- rcu_read_unlock();
do_next:
;
}
}
- in_dev_put(in_dev);
return;
reject_redirect:
@@ -1480,7 +1475,7 @@ reject_redirect:
&old_gw, dev->name, &new_gw,
&saddr, &daddr);
#endif
- in_dev_put(in_dev);
+ ;
}
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
@@ -1493,8 +1488,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
ip_rt_put(rt);
ret = NULL;
} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
- (rt->u.dst.expires &&
- time_after_eq(jiffies, rt->u.dst.expires))) {
+ (rt->dst.expires &&
+ time_after_eq(jiffies, rt->dst.expires))) {
unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
rt->fl.oif,
rt_genid(dev_net(dst->dev)));
@@ -1532,7 +1527,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
int log_martians;
rcu_read_lock();
- in_dev = __in_dev_get_rcu(rt->u.dst.dev);
+ in_dev = __in_dev_get_rcu(rt->dst.dev);
if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
rcu_read_unlock();
return;
@@ -1543,30 +1538,30 @@ void ip_rt_send_redirect(struct sk_buff *skb)
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
*/
- if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
- rt->u.dst.rate_tokens = 0;
+ if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
+ rt->dst.rate_tokens = 0;
/* Too many ignored redirects; do not send anything
- * set u.dst.rate_last to the last seen redirected packet.
+ * set dst.rate_last to the last seen redirected packet.
*/
- if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
- rt->u.dst.rate_last = jiffies;
+ if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
+ rt->dst.rate_last = jiffies;
return;
}
/* Check for load limit; set rate_last to the latest sent
* redirect.
*/
- if (rt->u.dst.rate_tokens == 0 ||
+ if (rt->dst.rate_tokens == 0 ||
time_after(jiffies,
- (rt->u.dst.rate_last +
- (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
+ (rt->dst.rate_last +
+ (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
- rt->u.dst.rate_last = jiffies;
- ++rt->u.dst.rate_tokens;
+ rt->dst.rate_last = jiffies;
+ ++rt->dst.rate_tokens;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
- rt->u.dst.rate_tokens == ip_rt_redirect_number &&
+ rt->dst.rate_tokens == ip_rt_redirect_number &&
net_ratelimit())
printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
&rt->rt_src, rt->rt_iif,
@@ -1581,7 +1576,7 @@ static int ip_error(struct sk_buff *skb)
unsigned long now;
int code;
- switch (rt->u.dst.error) {
+ switch (rt->dst.error) {
case EINVAL:
default:
goto out;
@@ -1590,7 +1585,7 @@ static int ip_error(struct sk_buff *skb)
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
- IP_INC_STATS_BH(dev_net(rt->u.dst.dev),
+ IP_INC_STATS_BH(dev_net(rt->dst.dev),
IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
@@ -1599,12 +1594,12 @@ static int ip_error(struct sk_buff *skb)
}
now = jiffies;
- rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
- if (rt->u.dst.rate_tokens > ip_rt_error_burst)
- rt->u.dst.rate_tokens = ip_rt_error_burst;
- rt->u.dst.rate_last = now;
- if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
- rt->u.dst.rate_tokens -= ip_rt_error_cost;
+ rt->dst.rate_tokens += now - rt->dst.rate_last;
+ if (rt->dst.rate_tokens > ip_rt_error_burst)
+ rt->dst.rate_tokens = ip_rt_error_burst;
+ rt->dst.rate_last = now;
+ if (rt->dst.rate_tokens >= ip_rt_error_cost) {
+ rt->dst.rate_tokens -= ip_rt_error_cost;
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
}
@@ -1649,7 +1644,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.dst.rt_next)) {
+ rth = rcu_dereference(rth->dst.rt_next)) {
unsigned short mtu = new_mtu;
if (rth->fl.fl4_dst != daddr ||
@@ -1658,8 +1653,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
rth->rt_src != iph->saddr ||
rth->fl.oif != ikeys[k] ||
rth->fl.iif != 0 ||
- dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
- !net_eq(dev_net(rth->u.dst.dev), net) ||
+ dst_metric_locked(&rth->dst, RTAX_MTU) ||
+ !net_eq(dev_net(rth->dst.dev), net) ||
rt_is_expired(rth))
continue;
@@ -1667,22 +1662,22 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
/* BSD 4.2 compatibility hack :-( */
if (mtu == 0 &&
- old_mtu >= dst_mtu(&rth->u.dst) &&
+ old_mtu >= dst_mtu(&rth->dst) &&
old_mtu >= 68 + (iph->ihl << 2))
old_mtu -= iph->ihl << 2;
mtu = guess_mtu(old_mtu);
}
- if (mtu <= dst_mtu(&rth->u.dst)) {
- if (mtu < dst_mtu(&rth->u.dst)) {
- dst_confirm(&rth->u.dst);
+ if (mtu <= dst_mtu(&rth->dst)) {
+ if (mtu < dst_mtu(&rth->dst)) {
+ dst_confirm(&rth->dst);
if (mtu < ip_rt_min_pmtu) {
mtu = ip_rt_min_pmtu;
- rth->u.dst.metrics[RTAX_LOCK-1] |=
+ rth->dst.metrics[RTAX_LOCK-1] |=
(1 << RTAX_MTU);
}
- rth->u.dst.metrics[RTAX_MTU-1] = mtu;
- dst_set_expires(&rth->u.dst,
+ rth->dst.metrics[RTAX_MTU-1] = mtu;
+ dst_set_expires(&rth->dst,
ip_rt_mtu_expires);
}
est_mtu = mtu;
@@ -1755,7 +1750,7 @@ static void ipv4_link_failure(struct sk_buff *skb)
rt = skb_rtable(skb);
if (rt)
- dst_set_expires(&rt->u.dst, 0);
+ dst_set_expires(&rt->dst, 0);
}
static int ip_rt_bug(struct sk_buff *skb)
@@ -1783,11 +1778,11 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
if (rt->fl.iif == 0)
src = rt->rt_src;
- else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) {
+ else if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) {
src = FIB_RES_PREFSRC(res);
fib_res_put(&res);
} else
- src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
+ src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
RT_SCOPE_UNIVERSE);
memcpy(addr, &src, 4);
}
@@ -1795,10 +1790,10 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
#ifdef CONFIG_NET_CLS_ROUTE
static void set_class_tag(struct rtable *rt, u32 tag)
{
- if (!(rt->u.dst.tclassid & 0xFFFF))
- rt->u.dst.tclassid |= tag & 0xFFFF;
- if (!(rt->u.dst.tclassid & 0xFFFF0000))
- rt->u.dst.tclassid |= tag & 0xFFFF0000;
+ if (!(rt->dst.tclassid & 0xFFFF))
+ rt->dst.tclassid |= tag & 0xFFFF;
+ if (!(rt->dst.tclassid & 0xFFFF0000))
+ rt->dst.tclassid |= tag & 0xFFFF0000;
}
#endif
@@ -1810,30 +1805,30 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
if (FIB_RES_GW(*res) &&
FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = FIB_RES_GW(*res);
- memcpy(rt->u.dst.metrics, fi->fib_metrics,
- sizeof(rt->u.dst.metrics));
+ memcpy(rt->dst.metrics, fi->fib_metrics,
+ sizeof(rt->dst.metrics));
if (fi->fib_mtu == 0) {
- rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
- if (dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
+ rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
+ if (dst_metric_locked(&rt->dst, RTAX_MTU) &&
rt->rt_gateway != rt->rt_dst &&
- rt->u.dst.dev->mtu > 576)
- rt->u.dst.metrics[RTAX_MTU-1] = 576;
+ rt->dst.dev->mtu > 576)
+ rt->dst.metrics[RTAX_MTU-1] = 576;
}
#ifdef CONFIG_NET_CLS_ROUTE
- rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
+ rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
#endif
} else
- rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
-
- if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
- rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
- if (dst_mtu(&rt->u.dst) > IP_MAX_MTU)
- rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
- if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
- rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
+ rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu;
+
+ if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
+ rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
+ if (dst_mtu(&rt->dst) > IP_MAX_MTU)
+ rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
+ if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0)
+ rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40,
ip_rt_min_advmss);
- if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40)
- rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
+ if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40)
+ rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
#ifdef CONFIG_NET_CLS_ROUTE
#ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1844,14 +1839,16 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
rt->rt_type = res->type;
}
+/* called in rcu_read_lock() section */
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, int our)
{
- unsigned hash;
+ unsigned int hash;
struct rtable *rth;
__be32 spec_dst;
- struct in_device *in_dev = in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
u32 itag = 0;
+ int err;
/* Primary sanity checks. */
@@ -1866,21 +1863,23 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (!ipv4_is_local_multicast(daddr))
goto e_inval;
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
- } else if (fib_validate_source(saddr, 0, tos, 0,
- dev, &spec_dst, &itag, 0) < 0)
- goto e_inval;
-
+ } else {
+ err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
+ &itag, 0);
+ if (err < 0)
+ goto e_err;
+ }
rth = dst_alloc(&ipv4_dst_ops);
if (!rth)
goto e_nobufs;
- rth->u.dst.output = ip_rt_bug;
- rth->u.dst.obsolete = -1;
+ rth->dst.output = ip_rt_bug;
+ rth->dst.obsolete = -1;
- atomic_set(&rth->u.dst.__refcnt, 1);
- rth->u.dst.flags= DST_HOST;
+ atomic_set(&rth->dst.__refcnt, 1);
+ rth->dst.flags= DST_HOST;
if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->u.dst.flags |= DST_NOPOLICY;
+ rth->dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
@@ -1888,13 +1887,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_NET_CLS_ROUTE
- rth->u.dst.tclassid = itag;
+ rth->dst.tclassid = itag;
#endif
rth->rt_iif =
rth->fl.iif = dev->ifindex;
- rth->u.dst.dev = init_net.loopback_dev;
- dev_hold(rth->u.dst.dev);
- rth->idev = in_dev_get(rth->u.dst.dev);
+ rth->dst.dev = init_net.loopback_dev;
+ dev_hold(rth->dst.dev);
+ rth->idev = in_dev_get(rth->dst.dev);
rth->fl.oif = 0;
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
@@ -1902,27 +1901,25 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_flags = RTCF_MULTICAST;
rth->rt_type = RTN_MULTICAST;
if (our) {
- rth->u.dst.input= ip_local_deliver;
+ rth->dst.input= ip_local_deliver;
rth->rt_flags |= RTCF_LOCAL;
}
#ifdef CONFIG_IP_MROUTE
if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
- rth->u.dst.input = ip_mr_input;
+ rth->dst.input = ip_mr_input;
#endif
RT_CACHE_STAT_INC(in_slow_mc);
- in_dev_put(in_dev);
hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
e_nobufs:
- in_dev_put(in_dev);
return -ENOBUFS;
-
e_inval:
- in_dev_put(in_dev);
return -EINVAL;
+e_err:
+ return err;
}
@@ -1956,22 +1953,22 @@ static void ip_handle_martian_source(struct net_device *dev,
#endif
}
+/* called in rcu_read_lock() section */
static int __mkroute_input(struct sk_buff *skb,
struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos,
struct rtable **result)
{
-
struct rtable *rth;
int err;
struct in_device *out_dev;
- unsigned flags = 0;
+ unsigned int flags = 0;
__be32 spec_dst;
u32 itag;
/* get a working reference to the output device */
- out_dev = in_dev_get(FIB_RES_DEV(*res));
+ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
if (out_dev == NULL) {
if (net_ratelimit())
printk(KERN_CRIT "Bug in ip_route_input" \
@@ -1986,7 +1983,6 @@ static int __mkroute_input(struct sk_buff *skb,
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
- err = -EINVAL;
goto cleanup;
}
@@ -2020,12 +2016,12 @@ static int __mkroute_input(struct sk_buff *skb,
goto cleanup;
}
- atomic_set(&rth->u.dst.__refcnt, 1);
- rth->u.dst.flags= DST_HOST;
+ atomic_set(&rth->dst.__refcnt, 1);
+ rth->dst.flags= DST_HOST;
if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->u.dst.flags |= DST_NOPOLICY;
+ rth->dst.flags |= DST_NOPOLICY;
if (IN_DEV_CONF_GET(out_dev, NOXFRM))
- rth->u.dst.flags |= DST_NOXFRM;
+ rth->dst.flags |= DST_NOXFRM;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
@@ -2035,16 +2031,16 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_gateway = daddr;
rth->rt_iif =
rth->fl.iif = in_dev->dev->ifindex;
- rth->u.dst.dev = (out_dev)->dev;
- dev_hold(rth->u.dst.dev);
- rth->idev = in_dev_get(rth->u.dst.dev);
+ rth->dst.dev = (out_dev)->dev;
+ dev_hold(rth->dst.dev);
+ rth->idev = in_dev_get(rth->dst.dev);
rth->fl.oif = 0;
rth->rt_spec_dst= spec_dst;
- rth->u.dst.obsolete = -1;
- rth->u.dst.input = ip_forward;
- rth->u.dst.output = ip_output;
- rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
+ rth->dst.obsolete = -1;
+ rth->dst.input = ip_forward;
+ rth->dst.output = ip_output;
+ rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
rt_set_nexthop(rth, res, itag);
@@ -2053,8 +2049,6 @@ static int __mkroute_input(struct sk_buff *skb,
*result = rth;
err = 0;
cleanup:
- /* release the working reference to the output device */
- in_dev_put(out_dev);
return err;
}
@@ -2080,7 +2074,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
/* put it into the cache */
hash = rt_hash(daddr, saddr, fl->iif,
- rt_genid(dev_net(rth->u.dst.dev)));
+ rt_genid(dev_net(rth->dst.dev)));
return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
}
@@ -2098,7 +2092,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
struct fib_result res;
- struct in_device *in_dev = in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = daddr,
.saddr = saddr,
@@ -2158,13 +2152,12 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
goto brd_input;
if (res.type == RTN_LOCAL) {
- int result;
- result = fib_validate_source(saddr, daddr, tos,
+ err = fib_validate_source(saddr, daddr, tos,
net->loopback_dev->ifindex,
dev, &spec_dst, &itag, skb->mark);
- if (result < 0)
- goto martian_source;
- if (result)
+ if (err < 0)
+ goto martian_source_keep_err;
+ if (err)
flags |= RTCF_DIRECTSRC;
spec_dst = daddr;
goto local_input;
@@ -2177,7 +2170,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
done:
- in_dev_put(in_dev);
if (free_res)
fib_res_put(&res);
out: return err;
@@ -2192,7 +2184,7 @@ brd_input:
err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
&itag, skb->mark);
if (err < 0)
- goto martian_source;
+ goto martian_source_keep_err;
if (err)
flags |= RTCF_DIRECTSRC;
}
@@ -2205,14 +2197,14 @@ local_input:
if (!rth)
goto e_nobufs;
- rth->u.dst.output= ip_rt_bug;
- rth->u.dst.obsolete = -1;
+ rth->dst.output= ip_rt_bug;
+ rth->dst.obsolete = -1;
rth->rt_genid = rt_genid(net);
- atomic_set(&rth->u.dst.__refcnt, 1);
- rth->u.dst.flags= DST_HOST;
+ atomic_set(&rth->dst.__refcnt, 1);
+ rth->dst.flags= DST_HOST;
if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->u.dst.flags |= DST_NOPOLICY;
+ rth->dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
@@ -2220,20 +2212,20 @@ local_input:
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_NET_CLS_ROUTE
- rth->u.dst.tclassid = itag;
+ rth->dst.tclassid = itag;
#endif
rth->rt_iif =
rth->fl.iif = dev->ifindex;
- rth->u.dst.dev = net->loopback_dev;
- dev_hold(rth->u.dst.dev);
- rth->idev = in_dev_get(rth->u.dst.dev);
+ rth->dst.dev = net->loopback_dev;
+ dev_hold(rth->dst.dev);
+ rth->idev = in_dev_get(rth->dst.dev);
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
- rth->u.dst.input= ip_local_deliver;
+ rth->dst.input= ip_local_deliver;
rth->rt_flags = flags|RTCF_LOCAL;
if (res.type == RTN_UNREACHABLE) {
- rth->u.dst.input= ip_error;
- rth->u.dst.error= -err;
+ rth->dst.input= ip_error;
+ rth->dst.error= -err;
rth->rt_flags &= ~RTCF_LOCAL;
}
rth->rt_type = res.type;
@@ -2273,8 +2265,10 @@ e_nobufs:
goto done;
martian_source:
+ err = -EINVAL;
+martian_source_keep_err:
ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
- goto e_inval;
+ goto done;
}
int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2284,32 +2278,34 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
unsigned hash;
int iif = dev->ifindex;
struct net *net;
+ int res;
net = dev_net(dev);
+ rcu_read_lock();
+
if (!rt_caching(net))
goto skip_cache;
tos &= IPTOS_RT_MASK;
hash = rt_hash(daddr, saddr, iif, rt_genid(net));
- rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.dst.rt_next)) {
+ rth = rcu_dereference(rth->dst.rt_next)) {
if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
(rth->fl.iif ^ iif) |
rth->fl.oif |
(rth->fl.fl4_tos ^ tos)) == 0 &&
rth->fl.mark == skb->mark &&
- net_eq(dev_net(rth->u.dst.dev), net) &&
+ net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
if (noref) {
- dst_use_noref(&rth->u.dst, jiffies);
- skb_dst_set_noref(skb, &rth->u.dst);
+ dst_use_noref(&rth->dst, jiffies);
+ skb_dst_set_noref(skb, &rth->dst);
} else {
- dst_use(&rth->u.dst, jiffies);
- skb_dst_set(skb, &rth->u.dst);
+ dst_use(&rth->dst, jiffies);
+ skb_dst_set(skb, &rth->dst);
}
RT_CACHE_STAT_INC(in_hit);
rcu_read_unlock();
@@ -2317,7 +2313,6 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
}
RT_CACHE_STAT_INC(in_hlist_search);
}
- rcu_read_unlock();
skip_cache:
/* Multicast recognition logic is moved from route cache to here.
@@ -2332,12 +2327,11 @@ skip_cache:
route cache entry is created eventually.
*/
if (ipv4_is_multicast(daddr)) {
- struct in_device *in_dev;
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
- rcu_read_lock();
- if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
+ if (in_dev) {
int our = ip_check_mc(in_dev, daddr, saddr,
- ip_hdr(skb)->protocol);
+ ip_hdr(skb)->protocol);
if (our
#ifdef CONFIG_IP_MROUTE
||
@@ -2345,15 +2339,18 @@ skip_cache:
IN_DEV_MFORWARD(in_dev))
#endif
) {
+ int res = ip_route_input_mc(skb, daddr, saddr,
+ tos, dev, our);
rcu_read_unlock();
- return ip_route_input_mc(skb, daddr, saddr,
- tos, dev, our);
+ return res;
}
}
rcu_read_unlock();
return -EINVAL;
}
- return ip_route_input_slow(skb, daddr, saddr, tos, dev);
+ res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
+ rcu_read_unlock();
+ return res;
}
EXPORT_SYMBOL(ip_route_input_common);
@@ -2415,12 +2412,12 @@ static int __mkroute_output(struct rtable **result,
goto cleanup;
}
- atomic_set(&rth->u.dst.__refcnt, 1);
- rth->u.dst.flags= DST_HOST;
+ atomic_set(&rth->dst.__refcnt, 1);
+ rth->dst.flags= DST_HOST;
if (IN_DEV_CONF_GET(in_dev, NOXFRM))
- rth->u.dst.flags |= DST_NOXFRM;
+ rth->dst.flags |= DST_NOXFRM;
if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->u.dst.flags |= DST_NOPOLICY;
+ rth->dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = oldflp->fl4_dst;
rth->fl.fl4_tos = tos;
@@ -2432,35 +2429,35 @@ static int __mkroute_output(struct rtable **result,
rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
/* get references to the devices that are to be hold by the routing
cache entry */
- rth->u.dst.dev = dev_out;
+ rth->dst.dev = dev_out;
dev_hold(dev_out);
rth->idev = in_dev_get(dev_out);
rth->rt_gateway = fl->fl4_dst;
rth->rt_spec_dst= fl->fl4_src;
- rth->u.dst.output=ip_output;
- rth->u.dst.obsolete = -1;
+ rth->dst.output=ip_output;
+ rth->dst.obsolete = -1;
rth->rt_genid = rt_genid(dev_net(dev_out));
RT_CACHE_STAT_INC(out_slow_tot);
if (flags & RTCF_LOCAL) {
- rth->u.dst.input = ip_local_deliver;
+ rth->dst.input = ip_local_deliver;
rth->rt_spec_dst = fl->fl4_dst;
}
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
rth->rt_spec_dst = fl->fl4_src;
if (flags & RTCF_LOCAL &&
!(dev_out->flags & IFF_LOOPBACK)) {
- rth->u.dst.output = ip_mc_output;
+ rth->dst.output = ip_mc_output;
RT_CACHE_STAT_INC(out_slow_mc);
}
#ifdef CONFIG_IP_MROUTE
if (res->type == RTN_MULTICAST) {
if (IN_DEV_MFORWARD(in_dev) &&
!ipv4_is_local_multicast(oldflp->fl4_dst)) {
- rth->u.dst.input = ip_mr_input;
- rth->u.dst.output = ip_mc_output;
+ rth->dst.input = ip_mr_input;
+ rth->dst.output = ip_mc_output;
}
}
#endif
@@ -2715,7 +2712,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
rcu_read_lock_bh();
for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
+ rth = rcu_dereference_bh(rth->dst.rt_next)) {
if (rth->fl.fl4_dst == flp->fl4_dst &&
rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 &&
@@ -2723,9 +2720,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
rth->fl.mark == flp->mark &&
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
(IPTOS_RT_MASK | RTO_ONLINK)) &&
- net_eq(dev_net(rth->u.dst.dev), net) &&
+ net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
- dst_use(&rth->u.dst, jiffies);
+ dst_use(&rth->dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh();
*rp = rth;
@@ -2762,15 +2759,15 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
dst_alloc(&ipv4_dst_blackhole_ops);
if (rt) {
- struct dst_entry *new = &rt->u.dst;
+ struct dst_entry *new = &rt->dst;
atomic_set(&new->__refcnt, 1);
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
- memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
+ memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
- new->dev = ort->u.dst.dev;
+ new->dev = ort->dst.dev;
if (new->dev)
dev_hold(new->dev);
@@ -2794,7 +2791,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
dst_free(new);
}
- dst_release(&(*rp)->u.dst);
+ dst_release(&(*rp)->dst);
*rp = rt;
return (rt ? 0 : -ENOMEM);
}
@@ -2864,11 +2861,11 @@ static int rt_fill_info(struct net *net,
r->rtm_src_len = 32;
NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
}
- if (rt->u.dst.dev)
- NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
+ if (rt->dst.dev)
+ NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
#ifdef CONFIG_NET_CLS_ROUTE
- if (rt->u.dst.tclassid)
- NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
+ if (rt->dst.tclassid)
+ NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
#endif
if (rt->fl.iif)
NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
@@ -2878,12 +2875,13 @@ static int rt_fill_info(struct net *net,
if (rt->rt_dst != rt->rt_gateway)
NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
- if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
+ if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
goto nla_put_failure;
- error = rt->u.dst.error;
- expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
+ error = rt->dst.error;
+ expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
if (rt->peer) {
+ inet_peer_refcheck(rt->peer);
id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
if (rt->peer->tcp_ts_stamp) {
ts = rt->peer->tcp_ts;
@@ -2914,7 +2912,7 @@ static int rt_fill_info(struct net *net,
NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
}
- if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
+ if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
expires, error) < 0)
goto nla_put_failure;
@@ -2979,8 +2977,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
local_bh_enable();
rt = skb_rtable(skb);
- if (err == 0 && rt->u.dst.error)
- err = -rt->u.dst.error;
+ if (err == 0 && rt->dst.error)
+ err = -rt->dst.error;
} else {
struct flowi fl = {
.nl_u = {
@@ -2998,7 +2996,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
if (err)
goto errout_free;
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
@@ -3034,12 +3032,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
continue;
rcu_read_lock_bh();
for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
- rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
- if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
+ rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
+ if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
continue;
if (rt_is_expired(rt))
continue;
- skb_dst_set_noref(skb, &rt->u.dst);
+ skb_dst_set_noref(skb, &rt->dst);
if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1, NLM_F_MULTI) <= 0) {
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 9f6b22206c52..51b5662545d6 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -138,23 +138,23 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
}
/*
- * This table has to be sorted and terminated with (__u16)-1.
- * XXX generate a better table.
- * Unresolved Issues: HIPPI with a 64k MSS is not well supported.
+ * MSS Values are taken from the 2009 paper
+ * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
+ * - values 1440 to 1460 accounted for 80% of observed mss values
+ * - values outside the 536-1460 range are rare (<0.2%).
+ *
+ * Table must be sorted.
*/
static __u16 const msstab[] = {
- 64 - 1,
- 256 - 1,
- 512 - 1,
- 536 - 1,
- 1024 - 1,
- 1440 - 1,
- 1460 - 1,
- 4312 - 1,
- (__u16)-1
+ 64,
+ 512,
+ 536,
+ 1024,
+ 1440,
+ 1460,
+ 4312,
+ 8960,
};
-/* The number doesn't include the -1 terminator */
-#define NUM_MSS (ARRAY_SIZE(msstab) - 1)
/*
* Generate a syncookie. mssp points to the mss, which is returned
@@ -169,10 +169,10 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
tcp_synq_overflow(sk);
- /* XXX sort msstab[] by probability? Binary search? */
- for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
- ;
- *mssp = msstab[mssind] + 1;
+ for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
+ if (mss >= msstab[mssind])
+ break;
+ *mssp = msstab[mssind];
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
@@ -202,7 +202,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
jiffies / (HZ * 60),
COUNTER_TRIES);
- return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
+ return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
@@ -230,23 +230,36 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
* The lowest 4 bits are for snd_wscale
* The next 4 lsb are for rcv_wscale
* The next lsb is for sack_ok
+ *
+ * return false if we decode an option that should not be.
*/
-void cookie_check_timestamp(struct tcp_options_received *tcp_opt)
+bool cookie_check_timestamp(struct tcp_options_received *tcp_opt)
{
/* echoed timestamp, 9 lowest bits contain options */
u32 options = tcp_opt->rcv_tsecr & TSMASK;
+ if (!tcp_opt->saw_tstamp) {
+ tcp_clear_options(tcp_opt);
+ return true;
+ }
+
+ if (!sysctl_tcp_timestamps)
+ return false;
+
tcp_opt->snd_wscale = options & 0xf;
options >>= 4;
tcp_opt->rcv_wscale = options & 0xf;
tcp_opt->sack_ok = (options >> 4) & 0x1;
- if (tcp_opt->sack_ok)
- tcp_sack_reset(tcp_opt);
+ if (tcp_opt->sack_ok && !sysctl_tcp_sack)
+ return false;
- if (tcp_opt->snd_wscale || tcp_opt->rcv_wscale)
+ if (tcp_opt->snd_wscale || tcp_opt->rcv_wscale) {
tcp_opt->wscale_ok = 1;
+ return sysctl_tcp_window_scaling != 0;
+ }
+ return true;
}
EXPORT_SYMBOL(cookie_check_timestamp);
@@ -266,7 +279,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct rtable *rt;
__u8 rcv_wscale;
- if (!sysctl_tcp_syncookies || !th->ack)
+ if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
if (tcp_synq_no_recent_overflow(sk) ||
@@ -281,8 +294,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
+ if (!cookie_check_timestamp(&tcp_opt))
+ goto out;
ret = NULL;
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
@@ -354,15 +367,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
}
/* Try to redo what tcp_v4_send_synack did. */
- req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
+ req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
ireq->wscale_ok, &rcv_wscale,
- dst_metric(&rt->u.dst, RTAX_INITRWND));
+ dst_metric(&rt->dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
- ret = get_cookie_sock(sk, skb, req, &rt->u.dst);
+ ret = get_cookie_sock(sk, skb, req, &rt->dst);
out: return ret;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6596b4feeddc..779d40c3b96e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -511,7 +511,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
}
@@ -527,7 +527,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
skb->csum = 0;
tcb->seq = tcb->end_seq = tp->write_seq;
- tcb->flags = TCPCB_FLAG_ACK;
+ tcb->flags = TCPHDR_ACK;
tcb->sacked = 0;
skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
@@ -815,7 +815,7 @@ new_segment:
skb_shinfo(skb)->gso_segs = 0;
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
copied += copy;
poffset += copy;
@@ -1061,7 +1061,7 @@ new_segment:
}
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
@@ -2999,6 +2999,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
const unsigned head_data_len = skb_headlen(skb) > header_len ?
skb_headlen(skb) - header_len : 0;
const struct skb_shared_info *shi = skb_shinfo(skb);
+ struct sk_buff *frag_iter;
sg_init_table(&sg, 1);
@@ -3013,6 +3014,10 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
return 1;
}
+ skb_walk_frags(skb, frag_iter)
+ if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
+ return 1;
+
return 0;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 548d575e6cc6..04334661fa28 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3286,7 +3286,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
* connection startup slow start one packet too
* quickly. This is severely frowned upon behavior.
*/
- if (!(scb->flags & TCPCB_FLAG_SYN)) {
+ if (!(scb->flags & TCPHDR_SYN)) {
flag |= FLAG_DATA_ACKED;
} else {
flag |= FLAG_SYN_ACKED;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index fe193e53af44..2e41e6f92968 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -204,10 +204,12 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
* TIME-WAIT * and initialize rx_opt.ts_recent from it,
* when trying new connection.
*/
- if (peer != NULL &&
- (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
- tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
- tp->rx_opt.ts_recent = peer->tcp_ts;
+ if (peer) {
+ inet_peer_refcheck(peer);
+ if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
+ tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
+ tp->rx_opt.ts_recent = peer->tcp_ts;
+ }
}
}
@@ -237,7 +239,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
- sk_setup_caps(sk, &rt->u.dst);
+ sk_setup_caps(sk, &rt->dst);
if (!tp->write_seq)
tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
@@ -793,19 +795,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
kfree(inet_rsk(req)->opt);
}
-#ifdef CONFIG_SYN_COOKIES
-static void syn_flood_warning(struct sk_buff *skb)
+static void syn_flood_warning(const struct sk_buff *skb)
{
- static unsigned long warntime;
+ const char *msg;
- if (time_after(jiffies, (warntime + HZ * 60))) {
- warntime = jiffies;
- printk(KERN_INFO
- "possible SYN flooding on port %d. Sending cookies.\n",
- ntohs(tcp_hdr(skb)->dest));
- }
-}
+#ifdef CONFIG_SYN_COOKIES
+ if (sysctl_tcp_syncookies)
+ msg = "Sending cookies";
+ else
#endif
+ msg = "Dropping request";
+
+ pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
+ ntohs(tcp_hdr(skb)->dest), msg);
+}
/*
* Save and compile IPv4 options into the request_sock if needed.
@@ -1243,6 +1246,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* evidently real one.
*/
if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+ if (net_ratelimit())
+ syn_flood_warning(skb);
#ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies) {
want_cookie = 1;
@@ -1328,7 +1333,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (want_cookie) {
#ifdef CONFIG_SYN_COOKIES
- syn_flood_warning(skb);
req->cookie_ts = tmp_opt.tstamp_ok;
#endif
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@ -1349,6 +1353,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
(dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) {
+ inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
@@ -1504,7 +1509,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
}
#ifdef CONFIG_SYN_COOKIES
- if (!th->rst && !th->syn && th->ack)
+ if (!th->syn)
sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
#endif
return sk;
@@ -1978,6 +1983,11 @@ static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
}
+/*
+ * Get next listener socket follow cur. If cur is NULL, get first socket
+ * starting from bucket given in st->bucket; when st->bucket is zero the
+ * very first socket in the hash table is returned.
+ */
static void *listening_get_next(struct seq_file *seq, void *cur)
{
struct inet_connection_sock *icsk;
@@ -1988,14 +1998,15 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
struct net *net = seq_file_net(seq);
if (!sk) {
- st->bucket = 0;
- ilb = &tcp_hashinfo.listening_hash[0];
+ ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock_bh(&ilb->lock);
sk = sk_nulls_head(&ilb->head);
+ st->offset = 0;
goto get_sk;
}
ilb = &tcp_hashinfo.listening_hash[st->bucket];
++st->num;
+ ++st->offset;
if (st->state == TCP_SEQ_STATE_OPENREQ) {
struct request_sock *req = cur;
@@ -2010,6 +2021,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
}
req = req->dl_next;
}
+ st->offset = 0;
if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
break;
get_req:
@@ -2045,6 +2057,7 @@ start_req:
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
}
spin_unlock_bh(&ilb->lock);
+ st->offset = 0;
if (++st->bucket < INET_LHTABLE_SIZE) {
ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock_bh(&ilb->lock);
@@ -2058,7 +2071,12 @@ out:
static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
- void *rc = listening_get_next(seq, NULL);
+ struct tcp_iter_state *st = seq->private;
+ void *rc;
+
+ st->bucket = 0;
+ st->offset = 0;
+ rc = listening_get_next(seq, NULL);
while (rc && *pos) {
rc = listening_get_next(seq, rc);
@@ -2073,13 +2091,18 @@ static inline int empty_bucket(struct tcp_iter_state *st)
hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
}
+/*
+ * Get first established socket starting from bucket given in st->bucket.
+ * If st->bucket is zero, the very first socket in the hash is returned.
+ */
static void *established_get_first(struct seq_file *seq)
{
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
void *rc = NULL;
- for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
+ st->offset = 0;
+ for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
struct sock *sk;
struct hlist_nulls_node *node;
struct inet_timewait_sock *tw;
@@ -2124,6 +2147,7 @@ static void *established_get_next(struct seq_file *seq, void *cur)
struct net *net = seq_file_net(seq);
++st->num;
+ ++st->offset;
if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
tw = cur;
@@ -2140,6 +2164,7 @@ get_tw:
st->state = TCP_SEQ_STATE_ESTABLISHED;
/* Look for next non empty bucket */
+ st->offset = 0;
while (++st->bucket <= tcp_hashinfo.ehash_mask &&
empty_bucket(st))
;
@@ -2167,7 +2192,11 @@ out:
static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
- void *rc = established_get_first(seq);
+ struct tcp_iter_state *st = seq->private;
+ void *rc;
+
+ st->bucket = 0;
+ rc = established_get_first(seq);
while (rc && pos) {
rc = established_get_next(seq, rc);
@@ -2192,24 +2221,72 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
return rc;
}
+static void *tcp_seek_last_pos(struct seq_file *seq)
+{
+ struct tcp_iter_state *st = seq->private;
+ int offset = st->offset;
+ int orig_num = st->num;
+ void *rc = NULL;
+
+ switch (st->state) {
+ case TCP_SEQ_STATE_OPENREQ:
+ case TCP_SEQ_STATE_LISTENING:
+ if (st->bucket >= INET_LHTABLE_SIZE)
+ break;
+ st->state = TCP_SEQ_STATE_LISTENING;
+ rc = listening_get_next(seq, NULL);
+ while (offset-- && rc)
+ rc = listening_get_next(seq, rc);
+ if (rc)
+ break;
+ st->bucket = 0;
+ /* Fallthrough */
+ case TCP_SEQ_STATE_ESTABLISHED:
+ case TCP_SEQ_STATE_TIME_WAIT:
+ st->state = TCP_SEQ_STATE_ESTABLISHED;
+ if (st->bucket > tcp_hashinfo.ehash_mask)
+ break;
+ rc = established_get_first(seq);
+ while (offset-- && rc)
+ rc = established_get_next(seq, rc);
+ }
+
+ st->num = orig_num;
+
+ return rc;
+}
+
static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct tcp_iter_state *st = seq->private;
+ void *rc;
+
+ if (*pos && *pos == st->last_pos) {
+ rc = tcp_seek_last_pos(seq);
+ if (rc)
+ goto out;
+ }
+
st->state = TCP_SEQ_STATE_LISTENING;
st->num = 0;
- return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+ st->bucket = 0;
+ st->offset = 0;
+ rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+
+out:
+ st->last_pos = *pos;
+ return rc;
}
static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
+ struct tcp_iter_state *st = seq->private;
void *rc = NULL;
- struct tcp_iter_state *st;
if (v == SEQ_START_TOKEN) {
rc = tcp_get_idx(seq, 0);
goto out;
}
- st = seq->private;
switch (st->state) {
case TCP_SEQ_STATE_OPENREQ:
@@ -2217,6 +2294,8 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
rc = listening_get_next(seq, v);
if (!rc) {
st->state = TCP_SEQ_STATE_ESTABLISHED;
+ st->bucket = 0;
+ st->offset = 0;
rc = established_get_first(seq);
}
break;
@@ -2227,6 +2306,7 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
out:
++*pos;
+ st->last_pos = *pos;
return rc;
}
@@ -2265,6 +2345,7 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
s = ((struct seq_file *)file->private_data)->private;
s->family = afinfo->family;
+ s->last_pos = 0;
return 0;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b4ed957f201a..51d316dbb058 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -294,9 +294,9 @@ static u16 tcp_select_window(struct sock *sk)
/* Packet ECN state for a SYN-ACK */
static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK))
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
}
/* Packet ECN state for a SYN. */
@@ -306,7 +306,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
tp->ecn_flags = 0;
if (sysctl_tcp_ecn == 1) {
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
}
}
@@ -361,7 +361,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
skb_shinfo(skb)->gso_type = 0;
TCP_SKB_CB(skb)->seq = seq;
- if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN))
+ if (flags & (TCPHDR_SYN | TCPHDR_FIN))
seq++;
TCP_SKB_CB(skb)->end_seq = seq;
}
@@ -820,7 +820,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts));
- if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
+ if (unlikely(tcb->flags & TCPHDR_SYN))
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
else
tcp_options_size = tcp_established_options(sk, skb, &opts,
@@ -843,7 +843,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
tcb->flags);
- if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
+ if (unlikely(tcb->flags & TCPHDR_SYN)) {
/* RFC1323: The window in SYN & SYN/ACK segments
* is never scaled.
*/
@@ -866,7 +866,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
}
tcp_options_write((__be32 *)(th + 1), tp, &opts);
- if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
+ if (likely((tcb->flags & TCPHDR_SYN) == 0))
TCP_ECN_send(sk, skb, tcp_header_size);
#ifdef CONFIG_TCP_MD5SIG
@@ -880,7 +880,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
icsk->icsk_af_ops->send_check(sk, skb);
- if (likely(tcb->flags & TCPCB_FLAG_ACK))
+ if (likely(tcb->flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
if (skb->len != tcp_header_size)
@@ -1023,7 +1023,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
/* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+ TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->flags = flags;
TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
@@ -1328,8 +1328,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
u32 in_flight, cwnd;
/* Don't be strict about the congestion window for the final FIN. */
- if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
- tcp_skb_pcount(skb) == 1)
+ if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
return 1;
in_flight = tcp_packets_in_flight(tp);
@@ -1398,7 +1397,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
* Nagle can be ignored during F-RTO too (see RFC4138).
*/
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
- (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
+ (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
return 1;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1487,7 +1486,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
/* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+ TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
TCP_SKB_CB(buff)->flags = flags;
/* This packet was never sent out yet, so no SACK bits. */
@@ -1518,7 +1517,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight;
- if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
+ if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
goto send_now;
if (icsk->icsk_ca_state != TCP_CA_Open)
@@ -1644,7 +1643,7 @@ static int tcp_mtu_probe(struct sock *sk)
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
- TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
+ TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
TCP_SKB_CB(nskb)->sacked = 0;
nskb->csum = 0;
nskb->ip_summed = skb->ip_summed;
@@ -1669,7 +1668,7 @@ static int tcp_mtu_probe(struct sock *sk)
sk_wmem_free_skb(sk, skb);
} else {
TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
- ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
+ ~(TCPHDR_FIN|TCPHDR_PSH);
if (!skb_shinfo(skb)->nr_frags) {
skb_pull(skb, copy);
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -2020,7 +2019,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
if (!sysctl_tcp_retrans_collapse)
return;
- if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)
+ if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
return;
tcp_for_write_queue_from_safe(skb, tmp, sk) {
@@ -2112,7 +2111,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
* since it is cheap to do so and saves bytes on the network.
*/
if (skb->len > 0 &&
- (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+ (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
if (!pskb_trim(skb, 0)) {
/* Reuse, even though it does some unnecessary work */
@@ -2301,7 +2300,7 @@ void tcp_send_fin(struct sock *sk)
mss_now = tcp_current_mss(sk);
if (tcp_send_head(sk) != NULL) {
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
TCP_SKB_CB(skb)->end_seq++;
tp->write_seq++;
} else {
@@ -2318,7 +2317,7 @@ void tcp_send_fin(struct sock *sk)
skb_reserve(skb, MAX_TCP_HEADER);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
- TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
+ TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
@@ -2343,7 +2342,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
- TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
+ TCPHDR_ACK | TCPHDR_RST);
/* Send it off. */
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority))
@@ -2363,11 +2362,11 @@ int tcp_send_synack(struct sock *sk)
struct sk_buff *skb;
skb = tcp_write_queue_head(sk);
- if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) {
+ if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
return -EFAULT;
}
- if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) {
+ if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
if (nskb == NULL)
@@ -2381,7 +2380,7 @@ int tcp_send_synack(struct sock *sk)
skb = nskb;
}
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
TCP_ECN_send_synack(tcp_sk(sk), skb);
}
TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2460,7 +2459,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
* not even correctly set)
*/
tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
- TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
+ TCPHDR_SYN | TCPHDR_ACK);
if (OPTION_COOKIE_EXTENSION & opts.options) {
if (s_data_desired) {
@@ -2592,7 +2591,7 @@ int tcp_connect(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER);
tp->snd_nxt = tp->write_seq;
- tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN);
+ tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
TCP_ECN_send_syn(sk, buff);
/* Send it off. */
@@ -2698,7 +2697,7 @@ void tcp_send_ack(struct sock *sk)
/* Reserve space for headers and prepare control bits. */
skb_reserve(buff, MAX_TCP_HEADER);
- tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK);
+ tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
/* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2732,7 +2731,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
* end to send an ack. Don't queue or clone SKB, just
* send it.
*/
- tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK);
+ tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
TCP_SKB_CB(skb)->when = tcp_time_stamp;
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
@@ -2762,13 +2761,13 @@ int tcp_write_wakeup(struct sock *sk)
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
skb->len > mss) {
seg_size = min(seg_size, mss);
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
if (tcp_fragment(sk, skb, seg_size, mss))
return -1;
} else if (!tcp_skb_pcount(skb))
tcp_set_skb_tso_segs(sk, skb, mss);
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index eec4ff456e33..32e0bef60d0a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -914,7 +914,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
!sock_flag(sk, SOCK_BROADCAST))
goto out;
if (connected)
- sk_dst_set(sk, dst_clone(&rt->u.dst));
+ sk_dst_set(sk, dst_clone(&rt->dst));
}
if (msg->msg_flags&MSG_CONFIRM)
@@ -978,7 +978,7 @@ out:
return err;
do_confirm:
- dst_confirm(&rt->u.dst);
+ dst_confirm(&rt->dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 1705476670ef..349327092c9e 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -37,7 +37,7 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
fl.fl4_src = saddr->a4;
err = __ip_route_output_key(net, &rt, &fl);
- dst = &rt->u.dst;
+ dst = &rt->dst;
if (err)
dst = ERR_PTR(err);
return dst;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e1a698df5706..b97bb1f30808 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -557,7 +557,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
pr_warning("Freeing alive inet6 address %p\n", ifp);
return;
}
- dst_release(&ifp->rt->u.dst);
+ dst_release(&ifp->rt->dst);
call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu);
}
@@ -823,7 +823,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
rt->rt6i_flags |= RTF_EXPIRES;
}
}
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
}
out:
@@ -1863,7 +1863,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
dev, expires, flags);
}
if (rt)
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
}
/* Try to figure out our local address for this prefix */
@@ -4093,11 +4093,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
if (ifp->idev->cnf.forwarding)
addrconf_leave_anycast(ifp);
addrconf_leave_solict(ifp->idev, &ifp->addr);
- dst_hold(&ifp->rt->u.dst);
+ dst_hold(&ifp->rt->dst);
if (ifp->state == INET6_IFADDR_STATE_DEAD &&
ip6_del_rt(ifp->rt))
- dst_free(&ifp->rt->u.dst);
+ dst_free(&ifp->rt->dst);
break;
}
}
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 8c4348cb1950..f0e774cea386 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -53,11 +53,7 @@ static struct ip6addrlbl_table
static inline
struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
{
-#ifdef CONFIG_NET_NS
- return lbl->lbl_net;
-#else
- return &init_net;
-#endif
+ return read_pnet(&lbl->lbl_net);
}
/*
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e733942dafe1..94b1b9c954bf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -651,7 +651,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
if (dst == NULL) {
struct inet_sock *inet = inet_sk(sk);
- struct in6_addr *final_p = NULL, final;
+ struct in6_addr *final_p, final;
struct flowi fl;
memset(&fl, 0, sizeof(fl));
@@ -665,12 +665,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
fl.fl_ip_sport = inet->inet_sport;
security_sk_classify_flow(sk, &fl);
- if (np->opt && np->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, np->opt, &final);
err = ip6_dst_lookup(sk, &dst, &fl);
if (err) {
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index b5b07054508a..0e5e943446f0 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -77,41 +77,40 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
pac->acl_next = NULL;
ipv6_addr_copy(&pac->acl_addr, addr);
+ rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
rt = rt6_lookup(net, addr, NULL, 0, 0);
if (rt) {
dev = rt->rt6i_dev;
- dev_hold(dev);
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
} else if (ishost) {
err = -EADDRNOTAVAIL;
- goto out_free_pac;
+ goto error;
} else {
/* router, no matching interface: just pick one */
-
- dev = dev_get_by_flags(net, IFF_UP, IFF_UP|IFF_LOOPBACK);
+ dev = dev_get_by_flags_rcu(net, IFF_UP,
+ IFF_UP | IFF_LOOPBACK);
}
} else
- dev = dev_get_by_index(net, ifindex);
+ dev = dev_get_by_index_rcu(net, ifindex);
if (dev == NULL) {
err = -ENODEV;
- goto out_free_pac;
+ goto error;
}
- idev = in6_dev_get(dev);
+ idev = __in6_dev_get(dev);
if (!idev) {
if (ifindex)
err = -ENODEV;
else
err = -EADDRNOTAVAIL;
- goto out_dev_put;
+ goto error;
}
/* reset ishost, now that we have a specific device */
ishost = !idev->cnf.forwarding;
- in6_dev_put(idev);
pac->acl_ifindex = dev->ifindex;
@@ -124,26 +123,22 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
if (ishost)
err = -EADDRNOTAVAIL;
if (err)
- goto out_dev_put;
+ goto error;
}
err = ipv6_dev_ac_inc(dev, addr);
- if (err)
- goto out_dev_put;
-
- write_lock_bh(&ipv6_sk_ac_lock);
- pac->acl_next = np->ipv6_ac_list;
- np->ipv6_ac_list = pac;
- write_unlock_bh(&ipv6_sk_ac_lock);
-
- dev_put(dev);
-
- return 0;
+ if (!err) {
+ write_lock_bh(&ipv6_sk_ac_lock);
+ pac->acl_next = np->ipv6_ac_list;
+ np->ipv6_ac_list = pac;
+ write_unlock_bh(&ipv6_sk_ac_lock);
+ pac = NULL;
+ }
-out_dev_put:
- dev_put(dev);
-out_free_pac:
- sock_kfree_s(sk, pac, sizeof(*pac));
+error:
+ rcu_read_unlock();
+ if (pac)
+ sock_kfree_s(sk, pac, sizeof(*pac));
return err;
}
@@ -176,11 +171,12 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
write_unlock_bh(&ipv6_sk_ac_lock);
- dev = dev_get_by_index(net, pac->acl_ifindex);
- if (dev) {
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
+ if (dev)
ipv6_dev_ac_dec(dev, &pac->acl_addr);
- dev_put(dev);
- }
+ rcu_read_unlock();
+
sock_kfree_s(sk, pac, sizeof(*pac));
return 0;
}
@@ -199,13 +195,12 @@ void ipv6_sock_ac_close(struct sock *sk)
write_unlock_bh(&ipv6_sk_ac_lock);
prev_index = 0;
+ rcu_read_lock();
while (pac) {
struct ipv6_ac_socklist *next = pac->acl_next;
if (pac->acl_ifindex != prev_index) {
- if (dev)
- dev_put(dev);
- dev = dev_get_by_index(net, pac->acl_ifindex);
+ dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
prev_index = pac->acl_ifindex;
}
if (dev)
@@ -213,8 +208,7 @@ void ipv6_sock_ac_close(struct sock *sk)
sock_kfree_s(sk, pac, sizeof(*pac));
pac = next;
}
- if (dev)
- dev_put(dev);
+ rcu_read_unlock();
}
#if 0
@@ -250,7 +244,7 @@ static void aca_put(struct ifacaddr6 *ac)
{
if (atomic_dec_and_test(&ac->aca_refcnt)) {
in6_dev_put(ac->aca_idev);
- dst_release(&ac->aca_rt->u.dst);
+ dst_release(&ac->aca_rt->dst);
kfree(ac);
}
}
@@ -356,40 +350,39 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr)
write_unlock_bh(&idev->lock);
addrconf_leave_solict(idev, &aca->aca_addr);
- dst_hold(&aca->aca_rt->u.dst);
+ dst_hold(&aca->aca_rt->dst);
ip6_del_rt(aca->aca_rt);
aca_put(aca);
return 0;
}
+/* called with rcu_read_lock() */
static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
{
- int ret;
- struct inet6_dev *idev = in6_dev_get(dev);
+ struct inet6_dev *idev = __in6_dev_get(dev);
+
if (idev == NULL)
return -ENODEV;
- ret = __ipv6_dev_ac_dec(idev, addr);
- in6_dev_put(idev);
- return ret;
+ return __ipv6_dev_ac_dec(idev, addr);
}
/*
* check if the interface has this anycast address
+ * called with rcu_read_lock()
*/
static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr)
{
struct inet6_dev *idev;
struct ifacaddr6 *aca;
- idev = in6_dev_get(dev);
+ idev = __in6_dev_get(dev);
if (idev) {
read_lock_bh(&idev->lock);
for (aca = idev->ac_list; aca; aca = aca->aca_next)
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
return aca != NULL;
}
return 0;
@@ -403,14 +396,15 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
{
int found = 0;
- if (dev)
- return ipv6_chk_acast_dev(dev, addr);
rcu_read_lock();
- for_each_netdev_rcu(net, dev)
- if (ipv6_chk_acast_dev(dev, addr)) {
- found = 1;
- break;
- }
+ if (dev)
+ found = ipv6_chk_acast_dev(dev, addr);
+ else
+ for_each_netdev_rcu(net, dev)
+ if (ipv6_chk_acast_dev(dev, addr)) {
+ found = 1;
+ break;
+ }
rcu_read_unlock();
return found;
}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 712684687c9a..7d929a22cbc2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -38,10 +38,11 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
- struct in6_addr *daddr, *final_p = NULL, final;
+ struct in6_addr *daddr, *final_p, final;
struct dst_entry *dst;
struct flowi fl;
struct ip6_flowlabel *flowlabel = NULL;
+ struct ipv6_txoptions *opt;
int addr_type;
int err;
@@ -155,19 +156,8 @@ ipv4_connected:
security_sk_classify_flow(sk, &fl);
- if (flowlabel) {
- if (flowlabel->opt && flowlabel->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
- } else if (np->opt && np->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ opt = flowlabel ? flowlabel->opt : np->opt;
+ final_p = fl6_update_dst(&fl, opt, &final);
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 8a659f92d17a..262f105d23b9 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -312,6 +312,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
Routing header.
********************************/
+/* called with rcu_read_lock() */
static int ipv6_rthdr_rcv(struct sk_buff *skb)
{
struct inet6_skb_parm *opt = IP6CB(skb);
@@ -324,12 +325,9 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
int accept_source_route = net->ipv6.devconf_all->accept_source_route;
- idev = in6_dev_get(skb->dev);
- if (idev) {
- if (accept_source_route > idev->cnf.accept_source_route)
- accept_source_route = idev->cnf.accept_source_route;
- in6_dev_put(idev);
- }
+ idev = __in6_dev_get(skb->dev);
+ if (idev && accept_source_route > idev->cnf.accept_source_route)
+ accept_source_route = idev->cnf.accept_source_route;
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
@@ -874,3 +872,27 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
return opt;
}
+/**
+ * fl6_update_dst - update flowi destination address with info given
+ * by srcrt option, if any.
+ *
+ * @fl: flowi for which fl6_dst is to be updated
+ * @opt: struct ipv6_txoptions in which to look for srcrt opt
+ * @orig: copy of original fl6_dst address if modified
+ *
+ * Returns NULL if no txoptions or no srcrt, otherwise returns orig
+ * and initial value of fl->fl6_dst set in orig
+ */
+struct in6_addr *fl6_update_dst(struct flowi *fl,
+ const struct ipv6_txoptions *opt,
+ struct in6_addr *orig)
+{
+ if (!opt || !opt->srcrt)
+ return NULL;
+
+ ipv6_addr_copy(orig, &fl->fl6_dst);
+ ipv6_addr_copy(&fl->fl6_dst, ((struct rt0_hdr *)opt->srcrt)->addr);
+ return orig;
+}
+
+EXPORT_SYMBOL_GPL(fl6_update_dst);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 8e44f8f9c188..b1108ede18e1 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -43,8 +43,8 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
if (arg.result)
return arg.result;
- dst_hold(&net->ipv6.ip6_null_entry->u.dst);
- return &net->ipv6.ip6_null_entry->u.dst;
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return &net->ipv6.ip6_null_entry->dst;
}
static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -86,7 +86,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
struct in6_addr saddr;
if (ipv6_dev_get_saddr(net,
- ip6_dst_idev(&rt->u.dst)->dev,
+ ip6_dst_idev(&rt->dst)->dev,
&flp->fl6_dst,
rt6_flags2srcprefs(flags),
&saddr))
@@ -99,12 +99,12 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
goto out;
}
again:
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
rt = NULL;
goto out;
discard_pkt:
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
out:
arg->result = rt;
return rt == NULL ? -EAGAIN : 0;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 0c5e3c3b7fd5..8a1628023bd1 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -185,7 +185,7 @@ int inet6_csk_xmit(struct sk_buff *skb)
struct ipv6_pinfo *np = inet6_sk(sk);
struct flowi fl;
struct dst_entry *dst;
- struct in6_addr *final_p = NULL, final;
+ struct in6_addr *final_p, final;
memset(&fl, 0, sizeof(fl));
fl.proto = sk->sk_protocol;
@@ -199,12 +199,7 @@ int inet6_csk_xmit(struct sk_buff *skb)
fl.fl_ip_dport = inet->inet_dport;
security_sk_classify_flow(sk, &fl);
- if (np->opt && np->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, np->opt, &final);
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 92a122b7795d..b6a585909d35 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -165,7 +165,7 @@ static __inline__ void node_free(struct fib6_node * fn)
static __inline__ void rt6_release(struct rt6_info *rt)
{
if (atomic_dec_and_test(&rt->rt6i_ref))
- dst_free(&rt->u.dst);
+ dst_free(&rt->dst);
}
static void fib6_link_table(struct net *net, struct fib6_table *tb)
@@ -278,7 +278,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
int res;
struct rt6_info *rt;
- for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
+ for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
res = rt6_dump_route(rt, w->args);
if (res < 0) {
/* Frame is full, suspend walking */
@@ -619,7 +619,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
ins = &fn->leaf;
- for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) {
+ for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) {
/*
* Search for duplicates
*/
@@ -647,7 +647,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
if (iter->rt6i_metric > rt->rt6i_metric)
break;
- ins = &iter->u.dst.rt6_next;
+ ins = &iter->dst.rt6_next;
}
/* Reset round-robin state, if necessary */
@@ -658,7 +658,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
* insert node
*/
- rt->u.dst.rt6_next = iter;
+ rt->dst.rt6_next = iter;
*ins = rt;
rt->rt6i_node = fn;
atomic_inc(&rt->rt6i_ref);
@@ -799,7 +799,7 @@ out:
atomic_inc(&pn->leaf->rt6i_ref);
}
#endif
- dst_free(&rt->u.dst);
+ dst_free(&rt->dst);
}
return err;
@@ -810,7 +810,7 @@ out:
st_failure:
if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
fib6_repair_tree(info->nl_net, fn);
- dst_free(&rt->u.dst);
+ dst_free(&rt->dst);
return err;
#endif
}
@@ -1108,7 +1108,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
RT6_TRACE("fib6_del_route\n");
/* Unlink it */
- *rtp = rt->u.dst.rt6_next;
+ *rtp = rt->dst.rt6_next;
rt->rt6i_node = NULL;
net->ipv6.rt6_stats->fib_rt_entries--;
net->ipv6.rt6_stats->fib_discarded_routes++;
@@ -1122,14 +1122,14 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
FOR_WALKERS(w) {
if (w->state == FWS_C && w->leaf == rt) {
RT6_TRACE("walker %p adjusted by delroute\n", w);
- w->leaf = rt->u.dst.rt6_next;
+ w->leaf = rt->dst.rt6_next;
if (w->leaf == NULL)
w->state = FWS_U;
}
}
read_unlock(&fib6_walker_lock);
- rt->u.dst.rt6_next = NULL;
+ rt->dst.rt6_next = NULL;
/* If it was last route, expunge its radix tree node */
if (fn->leaf == NULL) {
@@ -1168,7 +1168,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
struct rt6_info **rtp;
#if RT6_DEBUG >= 2
- if (rt->u.dst.obsolete>0) {
+ if (rt->dst.obsolete>0) {
WARN_ON(fn != NULL);
return -ENOENT;
}
@@ -1195,7 +1195,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
* Walk the leaf entries looking for ourself
*/
- for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) {
+ for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) {
if (*rtp == rt) {
fib6_del_route(fn, rtp, info);
return 0;
@@ -1334,7 +1334,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
.nl_net = c->net,
};
- for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
+ for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
res = c->func(rt, c->arg);
if (res < 0) {
w->leaf = rt;
@@ -1448,8 +1448,8 @@ static int fib6_age(struct rt6_info *rt, void *arg)
}
gc_args.more++;
} else if (rt->rt6i_flags & RTF_CACHE) {
- if (atomic_read(&rt->u.dst.__refcnt) == 0 &&
- time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) {
+ if (atomic_read(&rt->dst.__refcnt) == 0 &&
+ time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) {
RT6_TRACE("aging clone %p\n", rt);
return -1;
} else if ((rt->rt6i_flags & RTF_GATEWAY) &&
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 89425af0684c..d40b330c0ee6 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -698,7 +698,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
ipv6_hdr(skb)->payload_len = htons(first_len -
sizeof(struct ipv6hdr));
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
for (;;) {
/* Prepare header of the next frame,
@@ -726,7 +726,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
err = output(skb);
if(!err)
- IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
+ IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGCREATES);
if (err || !frag)
@@ -740,9 +740,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
kfree(tmp_hdr);
if (err == 0) {
- IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
+ IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGOKS);
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
return 0;
}
@@ -752,9 +752,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
frag = skb;
}
- IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst),
+ IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGFAILS);
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
return err;
}
@@ -785,7 +785,7 @@ slow_path:
* Allocate buffer.
*/
- if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
+ if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
@@ -798,7 +798,7 @@ slow_path:
*/
ip6_copy_metadata(frag, skb);
- skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
+ skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
skb_put(frag, len + hlen + sizeof(struct frag_hdr));
skb_reset_network_header(frag);
fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
@@ -1156,24 +1156,24 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
/* need source address above miyazawa*/
}
- dst_hold(&rt->u.dst);
- inet->cork.dst = &rt->u.dst;
+ dst_hold(&rt->dst);
+ inet->cork.dst = &rt->dst;
inet->cork.fl = *fl;
np->cork.hop_limit = hlimit;
np->cork.tclass = tclass;
mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
- rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path);
+ rt->dst.dev->mtu : dst_mtu(rt->dst.path);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
}
inet->cork.fragsize = mtu;
- if (dst_allfrag(rt->u.dst.path))
+ if (dst_allfrag(rt->dst.path))
inet->cork.flags |= IPCORK_ALLFRAG;
inet->cork.length = 0;
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
- exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0) -
+ exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
rt->rt6i_nfheader_len;
length += exthdrlen;
transhdrlen += exthdrlen;
@@ -1186,7 +1186,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
mtu = inet->cork.fragsize;
}
- hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
+ hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
(opt ? opt->opt_nflen : 0);
@@ -1224,7 +1224,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
}
if (proto == IPPROTO_UDP &&
- (rt->u.dst.dev->features & NETIF_F_UFO)) {
+ (rt->dst.dev->features & NETIF_F_UFO)) {
err = ip6_ufo_append_data(sk, getfrag, from, length,
hh_len, fragheaderlen,
@@ -1270,7 +1270,7 @@ alloc_new_skb:
fraglen = datalen + fragheaderlen;
if ((flags & MSG_MORE) &&
- !(rt->u.dst.dev->features&NETIF_F_SG))
+ !(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
alloclen = datalen + fragheaderlen;
@@ -1281,7 +1281,7 @@ alloc_new_skb:
* because we have no idea if we're the last one.
*/
if (datalen == length + fraggap)
- alloclen += rt->u.dst.trailer_len;
+ alloclen += rt->dst.trailer_len;
/*
* We just reserve space for fragment header.
@@ -1358,7 +1358,7 @@ alloc_new_skb:
if (copy > length)
copy = length;
- if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
+ if (!(rt->dst.dev->features&NETIF_F_SG)) {
unsigned int off;
off = skb->len;
@@ -1503,7 +1503,7 @@ int ip6_push_pending_frames(struct sock *sk)
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- skb_dst_set(skb, dst_clone(&rt->u.dst));
+ skb_dst_set(skb, dst_clone(&rt->dst));
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
if (proto == IPPROTO_ICMPV6) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8f39893d8081..0fd027f3f47e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -552,7 +552,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (ip_route_output_key(dev_net(skb->dev), &rt, &fl))
goto out;
- skb2->dev = rt->u.dst.dev;
+ skb2->dev = rt->dst.dev;
/* route "incoming" packet */
if (rt->rt_flags & RTCF_LOCAL) {
@@ -562,7 +562,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
fl.fl4_src = eiph->saddr;
fl.fl4_tos = eiph->tos;
if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
- rt->u.dst.dev->type != ARPHRD_TUNNEL) {
+ rt->dst.dev->type != ARPHRD_TUNNEL) {
ip_rt_put(rt);
goto out;
}
@@ -626,7 +626,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
icmpv6_send(skb2, rel_type, rel_code, rel_info);
if (rt)
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
kfree_skb(skb2);
}
@@ -1135,7 +1135,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
}
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
}
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ab1622d7d409..d1444b95ad7e 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -152,18 +152,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
mc_lst->next = NULL;
ipv6_addr_copy(&mc_lst->addr, addr);
+ rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
rt = rt6_lookup(net, addr, NULL, 0, 0);
if (rt) {
dev = rt->rt6i_dev;
- dev_hold(dev);
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
}
} else
- dev = dev_get_by_index(net, ifindex);
+ dev = dev_get_by_index_rcu(net, ifindex);
if (dev == NULL) {
+ rcu_read_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return -ENODEV;
}
@@ -180,8 +181,8 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
err = ipv6_dev_mc_inc(dev, addr);
if (err) {
+ rcu_read_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
- dev_put(dev);
return err;
}
@@ -190,7 +191,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
np->ipv6_mc_list = mc_lst;
write_unlock_bh(&ipv6_sk_mc_lock);
- dev_put(dev);
+ rcu_read_unlock();
return 0;
}
@@ -213,18 +214,17 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
*lnk = mc_lst->next;
write_unlock_bh(&ipv6_sk_mc_lock);
- dev = dev_get_by_index(net, mc_lst->ifindex);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
if (dev != NULL) {
- struct inet6_dev *idev = in6_dev_get(dev);
+ struct inet6_dev *idev = __in6_dev_get(dev);
(void) ip6_mc_leave_src(sk, mc_lst, idev);
- if (idev) {
+ if (idev)
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
- in6_dev_put(idev);
- }
- dev_put(dev);
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
+ rcu_read_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return 0;
}
@@ -234,43 +234,36 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
return -EADDRNOTAVAIL;
}
-static struct inet6_dev *ip6_mc_find_dev(struct net *net,
- struct in6_addr *group,
- int ifindex)
+/* called with rcu_read_lock() */
+static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
+ struct in6_addr *group,
+ int ifindex)
{
struct net_device *dev = NULL;
struct inet6_dev *idev = NULL;
if (ifindex == 0) {
- struct rt6_info *rt;
+ struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
- rt = rt6_lookup(net, group, NULL, 0, 0);
if (rt) {
dev = rt->rt6i_dev;
dev_hold(dev);
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
}
} else
- dev = dev_get_by_index(net, ifindex);
+ dev = dev_get_by_index_rcu(net, ifindex);
if (!dev)
- goto nodev;
- idev = in6_dev_get(dev);
+ return NULL;
+ idev = __in6_dev_get(dev);
if (!idev)
- goto release;
+ return NULL;;
read_lock_bh(&idev->lock);
- if (idev->dead)
- goto unlock_release;
-
+ if (idev->dead) {
+ read_unlock_bh(&idev->lock);
+ return NULL;
+ }
return idev;
-
-unlock_release:
- read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
-release:
- dev_put(dev);
-nodev:
- return NULL;
}
void ipv6_sock_mc_close(struct sock *sk)
@@ -286,19 +279,17 @@ void ipv6_sock_mc_close(struct sock *sk)
np->ipv6_mc_list = mc_lst->next;
write_unlock_bh(&ipv6_sk_mc_lock);
- dev = dev_get_by_index(net, mc_lst->ifindex);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
if (dev) {
- struct inet6_dev *idev = in6_dev_get(dev);
+ struct inet6_dev *idev = __in6_dev_get(dev);
(void) ip6_mc_leave_src(sk, mc_lst, idev);
- if (idev) {
+ if (idev)
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
- in6_dev_put(idev);
- }
- dev_put(dev);
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
-
+ rcu_read_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
write_lock_bh(&ipv6_sk_mc_lock);
@@ -327,14 +318,17 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
if (!ipv6_addr_is_multicast(group))
return -EINVAL;
- idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface);
- if (!idev)
+ rcu_read_lock();
+ idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
+ if (!idev) {
+ rcu_read_unlock();
return -ENODEV;
+ }
dev = idev->dev;
err = -EADDRNOTAVAIL;
- read_lock_bh(&ipv6_sk_mc_lock);
+ read_lock(&ipv6_sk_mc_lock);
for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
continue;
@@ -358,7 +352,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
pmc->sfmode = omode;
}
- write_lock_bh(&pmc->sflock);
+ write_lock(&pmc->sflock);
pmclocked = 1;
psl = pmc->sflist;
@@ -433,11 +427,10 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
ip6_mc_add_src(idev, group, omode, 1, source, 1);
done:
if (pmclocked)
- write_unlock_bh(&pmc->sflock);
- read_unlock_bh(&ipv6_sk_mc_lock);
+ write_unlock(&pmc->sflock);
+ read_unlock(&ipv6_sk_mc_lock);
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
- dev_put(dev);
+ rcu_read_unlock();
if (leavegroup)
return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
return err;
@@ -463,14 +456,17 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
gsf->gf_fmode != MCAST_EXCLUDE)
return -EINVAL;
- idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
+ rcu_read_lock();
+ idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
- if (!idev)
+ if (!idev) {
+ rcu_read_unlock();
return -ENODEV;
+ }
dev = idev->dev;
err = 0;
- read_lock_bh(&ipv6_sk_mc_lock);
+ read_lock(&ipv6_sk_mc_lock);
if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
leavegroup = 1;
@@ -512,7 +508,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
(void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
}
- write_lock_bh(&pmc->sflock);
+ write_lock(&pmc->sflock);
psl = pmc->sflist;
if (psl) {
(void) ip6_mc_del_src(idev, group, pmc->sfmode,
@@ -522,13 +518,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
(void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
pmc->sflist = newpsl;
pmc->sfmode = gsf->gf_fmode;
- write_unlock_bh(&pmc->sflock);
+ write_unlock(&pmc->sflock);
err = 0;
done:
- read_unlock_bh(&ipv6_sk_mc_lock);
+ read_unlock(&ipv6_sk_mc_lock);
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
- dev_put(dev);
+ rcu_read_unlock();
if (leavegroup)
err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
return err;
@@ -551,11 +546,13 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
if (!ipv6_addr_is_multicast(group))
return -EINVAL;
- idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
+ rcu_read_lock();
+ idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
- if (!idev)
+ if (!idev) {
+ rcu_read_unlock();
return -ENODEV;
-
+ }
dev = idev->dev;
err = -EADDRNOTAVAIL;
@@ -577,8 +574,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
psl = pmc->sflist;
count = psl ? psl->sl_count : 0;
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
- dev_put(dev);
+ rcu_read_unlock();
copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
gsf->gf_numsrc = count;
@@ -604,8 +600,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
return 0;
done:
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
- dev_put(dev);
+ rcu_read_unlock();
return err;
}
@@ -822,6 +817,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
struct ifmcaddr6 *mc;
struct inet6_dev *idev;
+ /* we need to take a reference on idev */
idev = in6_dev_get(dev);
if (idev == NULL)
@@ -860,7 +856,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
ipv6_addr_copy(&mc->mca_addr, addr);
- mc->idev = idev;
+ mc->idev = idev; /* (reference taken) */
mc->mca_users = 1;
/* mca_stamp should be updated upon changes */
mc->mca_cstamp = mc->mca_tstamp = jiffies;
@@ -915,16 +911,18 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
{
- struct inet6_dev *idev = in6_dev_get(dev);
+ struct inet6_dev *idev;
int err;
- if (!idev)
- return -ENODEV;
-
- err = __ipv6_dev_mc_dec(idev, addr);
+ rcu_read_lock();
- in6_dev_put(idev);
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ err = -ENODEV;
+ else
+ err = __ipv6_dev_mc_dec(idev, addr);
+ rcu_read_unlock();
return err;
}
@@ -965,7 +963,8 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
struct ifmcaddr6 *mc;
int rv = 0;
- idev = in6_dev_get(dev);
+ rcu_read_lock();
+ idev = __in6_dev_get(dev);
if (idev) {
read_lock_bh(&idev->lock);
for (mc = idev->mc_list; mc; mc=mc->next) {
@@ -992,8 +991,8 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
rv = 1; /* don't filter unspecified source */
}
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
}
+ rcu_read_unlock();
return rv;
}
@@ -1104,6 +1103,7 @@ static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
return 1;
}
+/* called with rcu_read_lock() */
int igmp6_event_query(struct sk_buff *skb)
{
struct mld2_query *mlh2 = NULL;
@@ -1127,7 +1127,7 @@ int igmp6_event_query(struct sk_buff *skb)
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
return -EINVAL;
- idev = in6_dev_get(skb->dev);
+ idev = __in6_dev_get(skb->dev);
if (idev == NULL)
return 0;
@@ -1137,10 +1137,8 @@ int igmp6_event_query(struct sk_buff *skb)
group_type = ipv6_addr_type(group);
if (group_type != IPV6_ADDR_ANY &&
- !(group_type&IPV6_ADDR_MULTICAST)) {
- in6_dev_put(idev);
+ !(group_type&IPV6_ADDR_MULTICAST))
return -EINVAL;
- }
if (len == 24) {
int switchback;
@@ -1161,10 +1159,9 @@ int igmp6_event_query(struct sk_buff *skb)
} else if (len >= 28) {
int srcs_offset = sizeof(struct mld2_query) -
sizeof(struct icmp6hdr);
- if (!pskb_may_pull(skb, srcs_offset)) {
- in6_dev_put(idev);
+ if (!pskb_may_pull(skb, srcs_offset))
return -EINVAL;
- }
+
mlh2 = (struct mld2_query *)skb_transport_header(skb);
max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
if (!max_delay)
@@ -1173,28 +1170,23 @@ int igmp6_event_query(struct sk_buff *skb)
if (mlh2->mld2q_qrv)
idev->mc_qrv = mlh2->mld2q_qrv;
if (group_type == IPV6_ADDR_ANY) { /* general query */
- if (mlh2->mld2q_nsrcs) {
- in6_dev_put(idev);
+ if (mlh2->mld2q_nsrcs)
return -EINVAL; /* no sources allowed */
- }
+
mld_gq_start_timer(idev);
- in6_dev_put(idev);
return 0;
}
/* mark sources to include, if group & source-specific */
if (mlh2->mld2q_nsrcs != 0) {
if (!pskb_may_pull(skb, srcs_offset +
- ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) {
- in6_dev_put(idev);
+ ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
return -EINVAL;
- }
+
mlh2 = (struct mld2_query *)skb_transport_header(skb);
mark = 1;
}
- } else {
- in6_dev_put(idev);
+ } else
return -EINVAL;
- }
read_lock_bh(&idev->lock);
if (group_type == IPV6_ADDR_ANY) {
@@ -1227,12 +1219,11 @@ int igmp6_event_query(struct sk_buff *skb)
}
}
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
return 0;
}
-
+/* called with rcu_read_lock() */
int igmp6_event_report(struct sk_buff *skb)
{
struct ifmcaddr6 *ma;
@@ -1260,7 +1251,7 @@ int igmp6_event_report(struct sk_buff *skb)
!(addr_type&IPV6_ADDR_LINKLOCAL))
return -EINVAL;
- idev = in6_dev_get(skb->dev);
+ idev = __in6_dev_get(skb->dev);
if (idev == NULL)
return -ENODEV;
@@ -1280,7 +1271,6 @@ int igmp6_event_report(struct sk_buff *skb)
}
}
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
return 0;
}
@@ -1396,12 +1386,14 @@ static void mld_sendpack(struct sk_buff *skb)
struct mld2_report *pmr =
(struct mld2_report *)skb_transport_header(skb);
int payload_len, mldlen;
- struct inet6_dev *idev = in6_dev_get(skb->dev);
+ struct inet6_dev *idev;
struct net *net = dev_net(skb->dev);
int err;
struct flowi fl;
struct dst_entry *dst;
+ rcu_read_lock();
+ idev = __in6_dev_get(skb->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
@@ -1441,8 +1433,7 @@ out:
} else
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
- if (likely(idev != NULL))
- in6_dev_put(idev);
+ rcu_read_unlock();
return;
err_out:
@@ -1779,7 +1770,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
IPPROTO_ICMPV6,
csum_partial(hdr, len, 0));
- idev = in6_dev_get(skb->dev);
+ rcu_read_lock();
+ idev = __in6_dev_get(skb->dev);
dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
if (!dst) {
@@ -1806,8 +1798,7 @@ out:
} else
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
- if (likely(idev != NULL))
- in6_dev_put(idev);
+ rcu_read_unlock();
return;
err_out:
@@ -1998,8 +1989,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
&psf->sf_addr))
break;
if (!dpsf) {
- dpsf = (struct ip6_sf_list *)
- kmalloc(sizeof(*dpsf), GFP_ATOMIC);
+ dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
if (!dpsf)
continue;
*dpsf = *psf;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0abdc242ddb7..1fc46fc60efd 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1229,7 +1229,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
ND_PRINTK0(KERN_ERR
"ICMPv6 RA: %s() got default router without neighbour.\n",
__func__);
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
in6_dev_put(in6_dev);
return;
}
@@ -1244,7 +1244,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (ra_msg->icmph.icmp6_hop_limit) {
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
if (rt)
- rt->u.dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit;
+ rt->dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit;
}
skip_defrtr:
@@ -1363,7 +1363,7 @@ skip_linkparms:
in6_dev->cnf.mtu6 = mtu;
if (rt)
- rt->u.dst.metrics[RTAX_MTU-1] = mtu;
+ rt->dst.metrics[RTAX_MTU-1] = mtu;
rt6_mtu_change(skb->dev, mtu);
}
@@ -1384,7 +1384,7 @@ skip_linkparms:
}
out:
if (rt)
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
else if (neigh)
neigh_release(neigh);
in6_dev_put(in6_dev);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index a74951c039b6..7155b2451d7c 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -151,9 +151,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
protocol,
csum_sub(0, hsum)));
skb->ip_summed = CHECKSUM_NONE;
- csum = __skb_checksum_complete_head(skb, dataoff + len);
- if (!csum)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return __skb_checksum_complete_head(skb, dataoff + len);
}
return csum;
};
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 8c201743d96d..413ab0754e1f 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -43,7 +43,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
-static DEFINE_RWLOCK(queue_lock);
+static DEFINE_SPINLOCK(queue_lock);
static int peer_pid __read_mostly;
static unsigned int copy_range __read_mostly;
static unsigned int queue_total;
@@ -73,10 +73,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
break;
case IPQ_COPY_PACKET:
- copy_mode = mode;
+ if (range > 0xFFFF)
+ range = 0xFFFF;
copy_range = range;
- if (copy_range > 0xFFFF)
- copy_range = 0xFFFF;
+ copy_mode = mode;
break;
default:
@@ -102,7 +102,7 @@ ipq_find_dequeue_entry(unsigned long id)
{
struct nf_queue_entry *entry = NULL, *i;
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
list_for_each_entry(i, &queue_list, list) {
if ((unsigned long)i == id) {
@@ -116,7 +116,7 @@ ipq_find_dequeue_entry(unsigned long id)
queue_total--;
}
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return entry;
}
@@ -137,9 +137,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
static void
ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
{
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
__ipq_flush(cmpfn, data);
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
}
static struct sk_buff *
@@ -153,9 +153,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
struct nlmsghdr *nlh;
struct timeval tv;
- read_lock_bh(&queue_lock);
-
- switch (copy_mode) {
+ switch (ACCESS_ONCE(copy_mode)) {
case IPQ_COPY_META:
case IPQ_COPY_NONE:
size = NLMSG_SPACE(sizeof(*pmsg));
@@ -163,26 +161,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
case IPQ_COPY_PACKET:
if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
- (*errp = skb_checksum_help(entry->skb))) {
- read_unlock_bh(&queue_lock);
+ (*errp = skb_checksum_help(entry->skb)))
return NULL;
- }
- if (copy_range == 0 || copy_range > entry->skb->len)
+
+ data_len = ACCESS_ONCE(copy_range);
+ if (data_len == 0 || data_len > entry->skb->len)
data_len = entry->skb->len;
- else
- data_len = copy_range;
size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
break;
default:
*errp = -EINVAL;
- read_unlock_bh(&queue_lock);
return NULL;
}
- read_unlock_bh(&queue_lock);
-
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
if (nskb == NULL)
return status;
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
if (!peer_pid)
goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
__ipq_enqueue_entry(entry);
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return status;
err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return status;
}
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
{
int status;
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
status = __ipq_set_mode(mode, range);
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return status;
}
@@ -441,11 +434,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
if (security_netlink_recv(skb, CAP_NET_ADMIN))
RCV_SKB_FAIL(-EPERM);
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
if (peer_pid) {
if (peer_pid != pid) {
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
RCV_SKB_FAIL(-EBUSY);
}
} else {
@@ -453,7 +446,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
peer_pid = pid;
}
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
status = ipq_receive_peer(NLMSG_DATA(nlh), type,
nlmsglen - NLMSG_LENGTH(0));
@@ -498,10 +491,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
struct netlink_notify *n = ptr;
if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
__ipq_reset();
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
}
return NOTIFY_DONE;
}
@@ -528,7 +521,7 @@ static ctl_table ipq_table[] = {
#ifdef CONFIG_PROC_FS
static int ip6_queue_show(struct seq_file *m, void *v)
{
- read_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
seq_printf(m,
"Peer PID : %d\n"
@@ -546,7 +539,7 @@ static int ip6_queue_show(struct seq_file *m, void *v)
queue_dropped,
queue_user_dropped);
- read_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return 0;
}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9d2d68f0e605..dc41d6d3c6c6 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -943,7 +943,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care
about). */
countersize = sizeof(struct xt_counters) * private->number;
- counters = vmalloc_node(countersize, numa_node_id());
+ counters = vmalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
@@ -1213,8 +1213,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ip6t_entry *iter;
ret = 0;
- counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
- numa_node_id());
+ counters = vmalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
@@ -1368,7 +1367,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
if (len != size + num_counters * sizeof(struct xt_counters))
return -EINVAL;
- paddc = vmalloc_node(len - size, numa_node_id());
+ paddc = vmalloc(len - size);
if (!paddc)
return -ENOMEM;
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 9be81776415e..1df3c8b6bf47 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -208,7 +208,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
type = icmp6h->icmp6_type - 130;
if (type >= 0 && type < sizeof(noct_valid_new) &&
noct_valid_new[type]) {
- skb->nfct = &nf_conntrack_untracked.ct_general;
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);
return NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6fb890187de0..9254008602d4 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -114,10 +114,8 @@ static void nf_skb_free(struct sk_buff *skb)
}
/* Memory Tracking Functions. */
-static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work)
+static void frag_kfree_skb(struct sk_buff *skb)
{
- if (work)
- *work -= skb->truesize;
atomic_sub(skb->truesize, &nf_init_frags.mem);
nf_skb_free(skb);
kfree_skb(skb);
@@ -335,7 +333,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
fq->q.fragments = next;
fq->q.meat -= free_it->len;
- frag_kfree_skb(free_it, NULL);
+ frag_kfree_skb(free_it);
}
}
@@ -442,7 +440,6 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
skb_shinfo(head)->frag_list = head->next;
skb_reset_transport_header(head);
skb_push(head, head->data - skb_network_header(head));
- atomic_sub(head->truesize, &nf_init_frags.mem);
for (fp=head->next; fp; fp = fp->next) {
head->data_len += fp->len;
@@ -452,8 +449,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
- atomic_sub(fp->truesize, &nf_init_frags.mem);
}
+ atomic_sub(head->truesize, &nf_init_frags.mem);
head->next = NULL;
head->dev = dev;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4a4dcbe4f8b2..e677937a07fc 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -602,31 +602,33 @@ out:
}
static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
- struct flowi *fl, struct rt6_info *rt,
+ struct flowi *fl, struct dst_entry **dstp,
unsigned int flags)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *iph;
struct sk_buff *skb;
int err;
+ struct rt6_info *rt = (struct rt6_info *)*dstp;
- if (length > rt->u.dst.dev->mtu) {
- ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu);
+ if (length > rt->dst.dev->mtu) {
+ ipv6_local_error(sk, EMSGSIZE, fl, rt->dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
skb = sock_alloc_send_skb(sk,
- length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
+ length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
- skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));
+ skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- skb_dst_set(skb, dst_clone(&rt->u.dst));
+ skb_dst_set(skb, &rt->dst);
+ *dstp = NULL;
skb_put(skb, length);
skb_reset_network_header(skb);
@@ -641,7 +643,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
- rt->u.dst.dev, dst_output);
+ rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
@@ -725,7 +727,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
{
struct ipv6_txoptions opt_space;
struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
- struct in6_addr *daddr, *final_p = NULL, final;
+ struct in6_addr *daddr, *final_p, final;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
@@ -847,13 +849,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- /* merge ip6_build_xmit from ip6_output */
- if (opt && opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, opt, &final);
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
fl.oif = np->mcast_oif;
@@ -892,9 +888,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
goto do_confirm;
back_from_confirm:
- if (inet->hdrincl) {
- err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags);
- } else {
+ if (inet->hdrincl)
+ err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, &dst, msg->msg_flags);
+ else {
lock_sock(sk);
err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 6d4292ff5854..0b97230a3251 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -150,11 +150,8 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
EXPORT_SYMBOL(ip6_frag_match);
/* Memory Tracking Functions. */
-static inline void frag_kfree_skb(struct netns_frags *nf,
- struct sk_buff *skb, int *work)
+static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
{
- if (work)
- *work -= skb->truesize;
atomic_sub(skb->truesize, &nf->mem);
kfree_skb(skb);
}
@@ -392,7 +389,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->q.fragments = next;
fq->q.meat -= free_it->len;
- frag_kfree_skb(fq->q.net, free_it, NULL);
+ frag_kfree_skb(fq->q.net, free_it);
}
}
@@ -524,7 +521,6 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
skb_shinfo(head)->frag_list = head->next;
skb_reset_transport_header(head);
skb_push(head, head->data - skb_network_header(head));
- atomic_sub(head->truesize, &fq->q.net->mem);
for (fp=head->next; fp; fp = fp->next) {
head->data_len += fp->len;
@@ -534,8 +530,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
- atomic_sub(fp->truesize, &fq->q.net->mem);
}
+ atomic_sub(head->truesize, &fq->q.net->mem);
head->next = NULL;
head->dev = dev;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 252d76199c41..8f2d0400cf8a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -126,16 +126,14 @@ static struct dst_ops ip6_dst_blackhole_ops = {
};
static struct rt6_info ip6_null_entry_template = {
- .u = {
- .dst = {
- .__refcnt = ATOMIC_INIT(1),
- .__use = 1,
- .obsolete = -1,
- .error = -ENETUNREACH,
- .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
- .input = ip6_pkt_discard,
- .output = ip6_pkt_discard_out,
- }
+ .dst = {
+ .__refcnt = ATOMIC_INIT(1),
+ .__use = 1,
+ .obsolete = -1,
+ .error = -ENETUNREACH,
+ .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
+ .input = ip6_pkt_discard,
+ .output = ip6_pkt_discard_out,
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
@@ -149,16 +147,14 @@ static int ip6_pkt_prohibit(struct sk_buff *skb);
static int ip6_pkt_prohibit_out(struct sk_buff *skb);
static struct rt6_info ip6_prohibit_entry_template = {
- .u = {
- .dst = {
- .__refcnt = ATOMIC_INIT(1),
- .__use = 1,
- .obsolete = -1,
- .error = -EACCES,
- .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
- .input = ip6_pkt_prohibit,
- .output = ip6_pkt_prohibit_out,
- }
+ .dst = {
+ .__refcnt = ATOMIC_INIT(1),
+ .__use = 1,
+ .obsolete = -1,
+ .error = -EACCES,
+ .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
+ .input = ip6_pkt_prohibit,
+ .output = ip6_pkt_prohibit_out,
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
@@ -167,16 +163,14 @@ static struct rt6_info ip6_prohibit_entry_template = {
};
static struct rt6_info ip6_blk_hole_entry_template = {
- .u = {
- .dst = {
- .__refcnt = ATOMIC_INIT(1),
- .__use = 1,
- .obsolete = -1,
- .error = -EINVAL,
- .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
- .input = dst_discard,
- .output = dst_discard,
- }
+ .dst = {
+ .__refcnt = ATOMIC_INIT(1),
+ .__use = 1,
+ .obsolete = -1,
+ .error = -EINVAL,
+ .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
+ .input = dst_discard,
+ .output = dst_discard,
},
.rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
.rt6i_protocol = RTPROT_KERNEL,
@@ -249,7 +243,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
if (!oif && ipv6_addr_any(saddr))
goto out;
- for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
+ for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
struct net_device *dev = sprt->rt6i_dev;
if (oif) {
@@ -407,10 +401,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
match = NULL;
for (rt = rr_head; rt && rt->rt6i_metric == metric;
- rt = rt->u.dst.rt6_next)
+ rt = rt->dst.rt6_next)
match = find_match(rt, oif, strict, &mpri, match);
for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
- rt = rt->u.dst.rt6_next)
+ rt = rt->dst.rt6_next)
match = find_match(rt, oif, strict, &mpri, match);
return match;
@@ -432,7 +426,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
if (!match &&
(strict & RT6_LOOKUP_F_REACHABLE)) {
- struct rt6_info *next = rt0->u.dst.rt6_next;
+ struct rt6_info *next = rt0->dst.rt6_next;
/* no entries matched; do round-robin */
if (!next || next->rt6i_metric != rt0->rt6i_metric)
@@ -517,7 +511,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
rt->rt6i_expires = jiffies + HZ * lifetime;
rt->rt6i_flags |= RTF_EXPIRES;
}
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
}
return 0;
}
@@ -555,7 +549,7 @@ restart:
rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
BACKTRACK(net, &fl->fl6_src);
out:
- dst_use(&rt->u.dst, jiffies);
+ dst_use(&rt->dst, jiffies);
read_unlock_bh(&table->tb6_lock);
return rt;
@@ -643,7 +637,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
rt->rt6i_dst.plen = 128;
rt->rt6i_flags |= RTF_CACHE;
- rt->u.dst.flags |= DST_HOST;
+ rt->dst.flags |= DST_HOST;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen && saddr) {
@@ -677,7 +671,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
if (net_ratelimit())
printk(KERN_WARNING
"Neighbour table overflow.\n");
- dst_free(&rt->u.dst);
+ dst_free(&rt->dst);
return NULL;
}
rt->rt6i_nexthop = neigh;
@@ -694,7 +688,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d
ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
rt->rt6i_dst.plen = 128;
rt->rt6i_flags |= RTF_CACHE;
- rt->u.dst.flags |= DST_HOST;
+ rt->dst.flags |= DST_HOST;
rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
}
return rt;
@@ -726,7 +720,7 @@ restart:
rt->rt6i_flags & RTF_CACHE)
goto out;
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
@@ -739,10 +733,10 @@ restart:
#endif
}
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
rt = nrt ? : net->ipv6.ip6_null_entry;
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
if (nrt) {
err = ip6_ins_rt(nrt);
if (!err)
@@ -756,7 +750,7 @@ restart:
* Race condition! In the gap, when table->tb6_lock was
* released someone could insert this route. Relookup.
*/
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
goto relookup;
out:
@@ -764,11 +758,11 @@ out:
reachable = 0;
goto restart_2;
}
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
out2:
- rt->u.dst.lastuse = jiffies;
- rt->u.dst.__use++;
+ rt->dst.lastuse = jiffies;
+ rt->dst.__use++;
return rt;
}
@@ -835,15 +829,15 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
struct dst_entry *new = NULL;
if (rt) {
- new = &rt->u.dst;
+ new = &rt->dst;
atomic_set(&new->__refcnt, 1);
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
- memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
- new->dev = ort->u.dst.dev;
+ memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+ new->dev = ort->dst.dev;
if (new->dev)
dev_hold(new->dev);
rt->rt6i_idev = ort->rt6i_idev;
@@ -912,7 +906,7 @@ static void ip6_link_failure(struct sk_buff *skb)
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
if (rt->rt6i_flags&RTF_CACHE) {
- dst_set_expires(&rt->u.dst, 0);
+ dst_set_expires(&rt->dst, 0);
rt->rt6i_flags |= RTF_EXPIRES;
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
rt->rt6i_node->fn_sernum = -1;
@@ -986,14 +980,14 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->rt6i_dev = dev;
rt->rt6i_idev = idev;
rt->rt6i_nexthop = neigh;
- atomic_set(&rt->u.dst.__refcnt, 1);
- rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
- rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
- rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
- rt->u.dst.output = ip6_output;
+ atomic_set(&rt->dst.__refcnt, 1);
+ rt->dst.metrics[RTAX_HOPLIMIT-1] = 255;
+ rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
+ rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+ rt->dst.output = ip6_output;
#if 0 /* there's no chance to use these for ndisc */
- rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
+ rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
? DST_HOST
: 0;
ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
@@ -1001,14 +995,14 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
#endif
spin_lock_bh(&icmp6_dst_lock);
- rt->u.dst.next = icmp6_dst_gc_list;
- icmp6_dst_gc_list = &rt->u.dst;
+ rt->dst.next = icmp6_dst_gc_list;
+ icmp6_dst_gc_list = &rt->dst;
spin_unlock_bh(&icmp6_dst_lock);
fib6_force_start_gc(net);
out:
- return &rt->u.dst;
+ return &rt->dst;
}
int icmp6_dst_gc(void)
@@ -1090,11 +1084,11 @@ static int ipv6_get_mtu(struct net_device *dev)
int mtu = IPV6_MIN_MTU;
struct inet6_dev *idev;
- idev = in6_dev_get(dev);
- if (idev) {
+ rcu_read_lock();
+ idev = __in6_dev_get(dev);
+ if (idev)
mtu = idev->cnf.mtu6;
- in6_dev_put(idev);
- }
+ rcu_read_unlock();
return mtu;
}
@@ -1103,12 +1097,15 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
int hoplimit = dst_metric(dst, RTAX_HOPLIMIT);
if (hoplimit < 0) {
struct net_device *dev = dst->dev;
- struct inet6_dev *idev = in6_dev_get(dev);
- if (idev) {
+ struct inet6_dev *idev;
+
+ rcu_read_lock();
+ idev = __in6_dev_get(dev);
+ if (idev)
hoplimit = idev->cnf.hop_limit;
- in6_dev_put(idev);
- } else
+ else
hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
+ rcu_read_unlock();
}
return hoplimit;
}
@@ -1159,7 +1156,7 @@ int ip6_route_add(struct fib6_config *cfg)
goto out;
}
- rt->u.dst.obsolete = -1;
+ rt->dst.obsolete = -1;
rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
jiffies + clock_t_to_jiffies(cfg->fc_expires) :
0;
@@ -1171,16 +1168,16 @@ int ip6_route_add(struct fib6_config *cfg)
addr_type = ipv6_addr_type(&cfg->fc_dst);
if (addr_type & IPV6_ADDR_MULTICAST)
- rt->u.dst.input = ip6_mc_input;
+ rt->dst.input = ip6_mc_input;
else
- rt->u.dst.input = ip6_forward;
+ rt->dst.input = ip6_forward;
- rt->u.dst.output = ip6_output;
+ rt->dst.output = ip6_output;
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->rt6i_dst.plen = cfg->fc_dst_len;
if (rt->rt6i_dst.plen == 128)
- rt->u.dst.flags = DST_HOST;
+ rt->dst.flags = DST_HOST;
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1208,9 +1205,9 @@ int ip6_route_add(struct fib6_config *cfg)
goto out;
}
}
- rt->u.dst.output = ip6_pkt_discard_out;
- rt->u.dst.input = ip6_pkt_discard;
- rt->u.dst.error = -ENETUNREACH;
+ rt->dst.output = ip6_pkt_discard_out;
+ rt->dst.input = ip6_pkt_discard;
+ rt->dst.error = -ENETUNREACH;
rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
goto install_route;
}
@@ -1244,7 +1241,7 @@ int ip6_route_add(struct fib6_config *cfg)
goto out;
if (dev) {
if (dev != grt->rt6i_dev) {
- dst_release(&grt->u.dst);
+ dst_release(&grt->dst);
goto out;
}
} else {
@@ -1255,7 +1252,7 @@ int ip6_route_add(struct fib6_config *cfg)
}
if (!(grt->rt6i_flags&RTF_GATEWAY))
err = 0;
- dst_release(&grt->u.dst);
+ dst_release(&grt->dst);
if (err)
goto out;
@@ -1294,18 +1291,18 @@ install_route:
goto out;
}
- rt->u.dst.metrics[type - 1] = nla_get_u32(nla);
+ rt->dst.metrics[type - 1] = nla_get_u32(nla);
}
}
}
- if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
- rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
- if (!dst_mtu(&rt->u.dst))
- rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
- if (!dst_metric(&rt->u.dst, RTAX_ADVMSS))
- rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
- rt->u.dst.dev = dev;
+ if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
+ rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
+ if (!dst_mtu(&rt->dst))
+ rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
+ if (!dst_metric(&rt->dst, RTAX_ADVMSS))
+ rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+ rt->dst.dev = dev;
rt->rt6i_idev = idev;
rt->rt6i_table = table;
@@ -1319,7 +1316,7 @@ out:
if (idev)
in6_dev_put(idev);
if (rt)
- dst_free(&rt->u.dst);
+ dst_free(&rt->dst);
return err;
}
@@ -1336,7 +1333,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
write_lock_bh(&table->tb6_lock);
err = fib6_del(rt, info);
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
write_unlock_bh(&table->tb6_lock);
@@ -1369,7 +1366,7 @@ static int ip6_route_del(struct fib6_config *cfg)
&cfg->fc_src, cfg->fc_src_len);
if (fn) {
- for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
+ for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (cfg->fc_ifindex &&
(rt->rt6i_dev == NULL ||
rt->rt6i_dev->ifindex != cfg->fc_ifindex))
@@ -1379,7 +1376,7 @@ static int ip6_route_del(struct fib6_config *cfg)
continue;
if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
continue;
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
return __ip6_del_rt(rt, &cfg->fc_nlinfo);
@@ -1421,7 +1418,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
read_lock_bh(&table->tb6_lock);
fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
restart:
- for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
+ for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
/*
* Current route is on-link; redirect is always invalid.
*
@@ -1445,7 +1442,7 @@ restart:
rt = net->ipv6.ip6_null_entry;
BACKTRACK(net, &fl->fl6_src);
out:
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
@@ -1513,10 +1510,10 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
* Look, redirects are sent only in response to data packets,
* so that this nexthop apparently is reachable. --ANK
*/
- dst_confirm(&rt->u.dst);
+ dst_confirm(&rt->dst);
/* Duplicate redirect: silently ignore. */
- if (neigh == rt->u.dst.neighbour)
+ if (neigh == rt->dst.neighbour)
goto out;
nrt = ip6_rt_copy(rt);
@@ -1529,20 +1526,20 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
nrt->rt6i_dst.plen = 128;
- nrt->u.dst.flags |= DST_HOST;
+ nrt->dst.flags |= DST_HOST;
ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
nrt->rt6i_nexthop = neigh_clone(neigh);
/* Reset pmtu, it may be better */
- nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
- nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
- dst_mtu(&nrt->u.dst));
+ nrt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
+ nrt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
+ dst_mtu(&nrt->dst));
if (ip6_ins_rt(nrt))
goto out;
- netevent.old = &rt->u.dst;
- netevent.new = &nrt->u.dst;
+ netevent.old = &rt->dst;
+ netevent.new = &nrt->dst;
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
if (rt->rt6i_flags&RTF_CACHE) {
@@ -1551,7 +1548,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
}
out:
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
}
/*
@@ -1570,7 +1567,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
if (rt == NULL)
return;
- if (pmtu >= dst_mtu(&rt->u.dst))
+ if (pmtu >= dst_mtu(&rt->dst))
goto out;
if (pmtu < IPV6_MIN_MTU) {
@@ -1588,7 +1585,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
They are sent only in response to data packets,
so that this nexthop apparently is reachable. --ANK
*/
- dst_confirm(&rt->u.dst);
+ dst_confirm(&rt->dst);
/* Host route. If it is static, it would be better
not to override it, but add new one, so that
@@ -1596,10 +1593,10 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
would return automatically.
*/
if (rt->rt6i_flags & RTF_CACHE) {
- rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
+ rt->dst.metrics[RTAX_MTU-1] = pmtu;
if (allfrag)
- rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
- dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
+ rt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+ dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
goto out;
}
@@ -1615,9 +1612,9 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
nrt = rt6_alloc_clone(rt, daddr);
if (nrt) {
- nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
+ nrt->dst.metrics[RTAX_MTU-1] = pmtu;
if (allfrag)
- nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+ nrt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
/* According to RFC 1981, detecting PMTU increase shouldn't be
* happened within 5 mins, the recommended timer is 10 mins.
@@ -1625,13 +1622,13 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
* which is 10 mins. After 10 mins the decreased pmtu is expired
* and detecting PMTU increase will be automatically happened.
*/
- dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
+ dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
ip6_ins_rt(nrt);
}
out:
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
}
/*
@@ -1644,18 +1641,18 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
if (rt) {
- rt->u.dst.input = ort->u.dst.input;
- rt->u.dst.output = ort->u.dst.output;
-
- memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
- rt->u.dst.error = ort->u.dst.error;
- rt->u.dst.dev = ort->u.dst.dev;
- if (rt->u.dst.dev)
- dev_hold(rt->u.dst.dev);
+ rt->dst.input = ort->dst.input;
+ rt->dst.output = ort->dst.output;
+
+ memcpy(rt->dst.metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+ rt->dst.error = ort->dst.error;
+ rt->dst.dev = ort->dst.dev;
+ if (rt->dst.dev)
+ dev_hold(rt->dst.dev);
rt->rt6i_idev = ort->rt6i_idev;
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
- rt->u.dst.lastuse = jiffies;
+ rt->dst.lastuse = jiffies;
rt->rt6i_expires = 0;
ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
@@ -1689,14 +1686,14 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
if (!fn)
goto out;
- for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
+ for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (rt->rt6i_dev->ifindex != ifindex)
continue;
if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
continue;
if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
continue;
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
break;
}
out:
@@ -1744,14 +1741,14 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
return NULL;
write_lock_bh(&table->tb6_lock);
- for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
+ for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
if (dev == rt->rt6i_dev &&
((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
ipv6_addr_equal(&rt->rt6i_gateway, addr))
break;
}
if (rt)
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
write_unlock_bh(&table->tb6_lock);
return rt;
}
@@ -1790,9 +1787,9 @@ void rt6_purge_dflt_routers(struct net *net)
restart:
read_lock_bh(&table->tb6_lock);
- for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
+ for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
- dst_hold(&rt->u.dst);
+ dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
ip6_del_rt(rt);
goto restart;
@@ -1930,15 +1927,15 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
dev_hold(net->loopback_dev);
in6_dev_hold(idev);
- rt->u.dst.flags = DST_HOST;
- rt->u.dst.input = ip6_input;
- rt->u.dst.output = ip6_output;
+ rt->dst.flags = DST_HOST;
+ rt->dst.input = ip6_input;
+ rt->dst.output = ip6_output;
rt->rt6i_dev = net->loopback_dev;
rt->rt6i_idev = idev;
- rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
- rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
- rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
- rt->u.dst.obsolete = -1;
+ rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
+ rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+ rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
+ rt->dst.obsolete = -1;
rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
if (anycast)
@@ -1947,7 +1944,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
rt->rt6i_flags |= RTF_LOCAL;
neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
if (IS_ERR(neigh)) {
- dst_free(&rt->u.dst);
+ dst_free(&rt->dst);
/* We are casting this because that is the return
* value type. But an errno encoded pointer is the
@@ -1962,7 +1959,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
rt->rt6i_dst.plen = 128;
rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
- atomic_set(&rt->u.dst.__refcnt, 1);
+ atomic_set(&rt->dst.__refcnt, 1);
return rt;
}
@@ -2033,12 +2030,12 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
PMTU discouvery.
*/
if (rt->rt6i_dev == arg->dev &&
- !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
- (dst_mtu(&rt->u.dst) >= arg->mtu ||
- (dst_mtu(&rt->u.dst) < arg->mtu &&
- dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
- rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
- rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
+ !dst_metric_locked(&rt->dst, RTAX_MTU) &&
+ (dst_mtu(&rt->dst) >= arg->mtu ||
+ (dst_mtu(&rt->dst) < arg->mtu &&
+ dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
+ rt->dst.metrics[RTAX_MTU-1] = arg->mtu;
+ rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
}
return 0;
}
@@ -2252,20 +2249,20 @@ static int rt6_fill_node(struct net *net,
#endif
NLA_PUT_U32(skb, RTA_IIF, iif);
} else if (dst) {
- struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
+ struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
struct in6_addr saddr_buf;
if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
dst, 0, &saddr_buf) == 0)
NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
}
- if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
+ if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
goto nla_put_failure;
- if (rt->u.dst.neighbour)
- NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
+ if (rt->dst.neighbour)
+ NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key);
- if (rt->u.dst.dev)
+ if (rt->dst.dev)
NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
@@ -2277,8 +2274,8 @@ static int rt6_fill_node(struct net *net,
else
expires = INT_MAX;
- if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
- expires, rt->u.dst.error) < 0)
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
+ expires, rt->dst.error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -2364,7 +2361,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
@@ -2416,12 +2413,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
struct net *net = dev_net(dev);
if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
- net->ipv6.ip6_null_entry->u.dst.dev = dev;
+ net->ipv6.ip6_null_entry->dst.dev = dev;
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- net->ipv6.ip6_prohibit_entry->u.dst.dev = dev;
+ net->ipv6.ip6_prohibit_entry->dst.dev = dev;
net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
- net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev;
+ net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
#endif
}
@@ -2464,8 +2461,8 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
seq_puts(m, "00000000000000000000000000000000");
}
seq_printf(m, " %08x %08x %08x %08x %8s\n",
- rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
- rt->u.dst.__use, rt->rt6i_flags,
+ rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
+ rt->dst.__use, rt->rt6i_flags,
rt->rt6i_dev ? rt->rt6i_dev->name : "");
return 0;
}
@@ -2646,9 +2643,9 @@ static int __net_init ip6_route_net_init(struct net *net)
GFP_KERNEL);
if (!net->ipv6.ip6_null_entry)
goto out_ip6_dst_ops;
- net->ipv6.ip6_null_entry->u.dst.path =
+ net->ipv6.ip6_null_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_null_entry;
- net->ipv6.ip6_null_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
+ net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2656,18 +2653,18 @@ static int __net_init ip6_route_net_init(struct net *net)
GFP_KERNEL);
if (!net->ipv6.ip6_prohibit_entry)
goto out_ip6_null_entry;
- net->ipv6.ip6_prohibit_entry->u.dst.path =
+ net->ipv6.ip6_prohibit_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
- net->ipv6.ip6_prohibit_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
+ net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
sizeof(*net->ipv6.ip6_blk_hole_entry),
GFP_KERNEL);
if (!net->ipv6.ip6_blk_hole_entry)
goto out_ip6_prohibit_entry;
- net->ipv6.ip6_blk_hole_entry->u.dst.path =
+ net->ipv6.ip6_blk_hole_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
- net->ipv6.ip6_blk_hole_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
+ net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
#endif
net->ipv6.sysctl.flush_delay = 0;
@@ -2742,12 +2739,12 @@ int __init ip6_route_init(void)
/* Registering of the loopback is done before this portion of code,
* the loopback reference in rt6_info will not be taken, do it
* manually for init_net */
- init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
#endif
ret = fib6_init();
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e51e650ea80b..4699cd3c3118 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -249,8 +249,6 @@ failed:
return NULL;
}
-static DEFINE_SPINLOCK(ipip6_prl_lock);
-
#define for_each_prl_rcu(start) \
for (prl = rcu_dereference(start); \
prl; \
@@ -340,7 +338,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
if (a->addr == htonl(INADDR_ANY))
return -EINVAL;
- spin_lock(&ipip6_prl_lock);
+ ASSERT_RTNL();
for (p = t->prl; p; p = p->next) {
if (p->addr == a->addr) {
@@ -370,7 +368,6 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
t->prl_count++;
rcu_assign_pointer(t->prl, p);
out:
- spin_unlock(&ipip6_prl_lock);
return err;
}
@@ -397,7 +394,7 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
struct ip_tunnel_prl_entry *x, **p;
int err = 0;
- spin_lock(&ipip6_prl_lock);
+ ASSERT_RTNL();
if (a && a->addr != htonl(INADDR_ANY)) {
for (p = &t->prl; *p; p = &(*p)->next) {
@@ -419,7 +416,6 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
}
}
out:
- spin_unlock(&ipip6_prl_lock);
return err;
}
@@ -716,7 +712,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
stats->tx_carrier_errors++;
goto tx_error_icmp;
}
- tdev = rt->u.dst.dev;
+ tdev = rt->dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
@@ -725,7 +721,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
}
if (df) {
- mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
+ mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
stats->collisions++;
@@ -784,7 +780,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags = 0;
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/*
* Push down and install the IPIP header.
@@ -833,7 +829,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
.proto = IPPROTO_IPV6 };
struct rtable *rt;
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
- tdev = rt->u.dst.dev;
+ tdev = rt->dst.dev;
ip_rt_put(rt);
}
dev->flags |= IFF_POINTOPOINT;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 34d1f0690d7e..c7ee57421ece 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -27,28 +27,17 @@ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
-/*
- * This table has to be sorted and terminated with (__u16)-1.
- * XXX generate a better table.
- * Unresolved Issues: HIPPI with a 64k MSS is not well supported.
- *
- * Taken directly from ipv4 implementation.
- * Should this list be modified for ipv6 use or is it close enough?
- * rfc 2460 8.3 suggests mss values 20 bytes less than ipv4 counterpart
- */
+/* Table must be sorted. */
static __u16 const msstab[] = {
- 64 - 1,
- 256 - 1,
- 512 - 1,
- 536 - 1,
- 1024 - 1,
- 1440 - 1,
- 1460 - 1,
- 4312 - 1,
- (__u16)-1
+ 64,
+ 512,
+ 536,
+ 1280 - 60,
+ 1480 - 60,
+ 1500 - 60,
+ 4460 - 60,
+ 9000 - 60,
};
-/* The number doesn't include the -1 terminator */
-#define NUM_MSS (ARRAY_SIZE(msstab) - 1)
/*
* This (misnamed) value is the age of syncookie which is permitted.
@@ -134,9 +123,11 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
tcp_synq_overflow(sk);
- for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
- ;
- *mssp = msstab[mssind] + 1;
+ for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
+ if (mss >= msstab[mssind])
+ break;
+
+ *mssp = msstab[mssind];
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
@@ -154,7 +145,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
th->source, th->dest, seq,
jiffies / (HZ * 60), COUNTER_TRIES);
- return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
+ return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
@@ -174,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
struct dst_entry *dst;
__u8 rcv_wscale;
- if (!sysctl_tcp_syncookies || !th->ack)
+ if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
if (tcp_synq_no_recent_overflow(sk) ||
@@ -189,8 +180,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
+ if (!cookie_check_timestamp(&tcp_opt))
+ goto out;
ret = NULL;
req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
@@ -240,17 +231,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
* me if there is a preferred way.
*/
{
- struct in6_addr *final_p = NULL, final;
+ struct in6_addr *final_p, final;
struct flowi fl;
memset(&fl, 0, sizeof(fl));
fl.proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
- if (np->opt && np->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, np->opt, &final);
ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
fl.oif = sk->sk_bound_dev_if;
fl.mark = sk->sk_mark;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2b7c3a100e2c..f87534569366 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -129,7 +129,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct inet_connection_sock *icsk = inet_csk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct in6_addr *saddr = NULL, *final_p = NULL, final;
+ struct in6_addr *saddr = NULL, *final_p, final;
struct flowi fl;
struct dst_entry *dst;
int addr_type;
@@ -250,12 +250,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl.fl_ip_dport = usin->sin6_port;
fl.fl_ip_sport = inet->inet_sport;
- if (np->opt && np->opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, np->opt, &final);
security_sk_classify_flow(sk, &fl);
@@ -477,7 +472,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff * skb;
struct ipv6_txoptions *opt = NULL;
- struct in6_addr * final_p = NULL, final;
+ struct in6_addr * final_p, final;
struct flowi fl;
struct dst_entry *dst;
int err = -1;
@@ -494,12 +489,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
security_req_classify_flow(req, &fl);
opt = np->opt;
- if (opt && opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, opt, &final);
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
@@ -1167,7 +1157,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
}
#ifdef CONFIG_SYN_COOKIES
- if (!th->rst && !th->syn && th->ack)
+ if (!th->syn)
sk = cookie_v6_check(sk, skb);
#endif
return sk;
@@ -1282,10 +1272,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!want_cookie)
TCP_ECN_create_request(req, tcp_hdr(skb));
- if (want_cookie) {
- isn = cookie_v6_init_sequence(sk, skb, &req->mss);
- req->cookie_ts = tmp_opt.tstamp_ok;
- } else if (!isn) {
+ if (!isn) {
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1298,8 +1285,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
treq->iif = inet6_iif(skb);
-
- isn = tcp_v6_init_sequence(skb);
+ if (!want_cookie) {
+ isn = tcp_v6_init_sequence(skb);
+ } else {
+ isn = cookie_v6_init_sequence(sk, skb, &req->mss);
+ req->cookie_ts = tmp_opt.tstamp_ok;
+ }
}
tcp_rsk(req)->snt_isn = isn;
@@ -1392,18 +1383,13 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
goto out_overflow;
if (dst == NULL) {
- struct in6_addr *final_p = NULL, final;
+ struct in6_addr *final_p, final;
struct flowi fl;
memset(&fl, 0, sizeof(fl));
fl.proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
- if (opt && opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
- }
+ final_p = fl6_update_dst(&fl, opt, &final);
ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
fl.oif = sk->sk_bound_dev_if;
fl.mark = sk->sk_mark;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 87be58673b55..1dd1affdead2 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -927,7 +927,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
- struct in6_addr *daddr, *final_p = NULL, final;
+ struct in6_addr *daddr, *final_p, final;
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct flowi fl;
@@ -1097,14 +1097,9 @@ do_udp_sendmsg:
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
fl.fl_ip_sport = inet->inet_sport;
- /* merge ip6_build_xmit from ip6_output */
- if (opt && opt->srcrt) {
- struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
- ipv6_addr_copy(&final, &fl.fl6_dst);
- ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
- final_p = &final;
+ final_p = fl6_update_dst(&fl, opt, &final);
+ if (final_p)
connected = 0;
- }
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
fl.oif = np->mcast_oif;
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 47db1d8a0d92..285761e77d90 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1853,23 +1853,23 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
self->remote_credit);
seq_printf(seq, "send credit: %d\n",
self->send_credit);
- seq_printf(seq, " tx packets: %ld, ",
+ seq_printf(seq, " tx packets: %lu, ",
self->stats.tx_packets);
- seq_printf(seq, "rx packets: %ld, ",
+ seq_printf(seq, "rx packets: %lu, ",
self->stats.rx_packets);
- seq_printf(seq, "tx_queue len: %d ",
+ seq_printf(seq, "tx_queue len: %u ",
skb_queue_len(&self->tx_queue));
- seq_printf(seq, "rx_queue len: %d\n",
+ seq_printf(seq, "rx_queue len: %u\n",
skb_queue_len(&self->rx_queue));
seq_printf(seq, " tx_sdu_busy: %s, ",
self->tx_sdu_busy? "TRUE":"FALSE");
seq_printf(seq, "rx_sdu_busy: %s\n",
self->rx_sdu_busy? "TRUE":"FALSE");
- seq_printf(seq, " max_seg_size: %d, ",
+ seq_printf(seq, " max_seg_size: %u, ",
self->max_seg_size);
- seq_printf(seq, "tx_max_sdu_size: %d, ",
+ seq_printf(seq, "tx_max_sdu_size: %u, ",
self->tx_max_sdu_size);
- seq_printf(seq, "rx_max_sdu_size: %d\n",
+ seq_printf(seq, "rx_max_sdu_size: %u\n",
self->rx_max_sdu_size);
seq_printf(seq, " Used by (%s)\n\n",
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f28ad2cc8428..499c045d6910 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1463,7 +1463,7 @@ struct iucv_path_pending {
u32 res3;
u8 ippollfg;
u8 res4[3];
-} __attribute__ ((packed));
+} __packed;
static void iucv_path_pending(struct iucv_irq_data *data)
{
@@ -1524,7 +1524,7 @@ struct iucv_path_complete {
u32 res3;
u8 ippollfg;
u8 res4[3];
-} __attribute__ ((packed));
+} __packed;
static void iucv_path_complete(struct iucv_irq_data *data)
{
@@ -1554,7 +1554,7 @@ struct iucv_path_severed {
u32 res4;
u8 ippollfg;
u8 res5[3];
-} __attribute__ ((packed));
+} __packed;
static void iucv_path_severed(struct iucv_irq_data *data)
{
@@ -1590,7 +1590,7 @@ struct iucv_path_quiesced {
u32 res4;
u8 ippollfg;
u8 res5[3];
-} __attribute__ ((packed));
+} __packed;
static void iucv_path_quiesced(struct iucv_irq_data *data)
{
@@ -1618,7 +1618,7 @@ struct iucv_path_resumed {
u32 res4;
u8 ippollfg;
u8 res5[3];
-} __attribute__ ((packed));
+} __packed;
static void iucv_path_resumed(struct iucv_irq_data *data)
{
@@ -1649,7 +1649,7 @@ struct iucv_message_complete {
u32 ipbfln2f;
u8 ippollfg;
u8 res2[3];
-} __attribute__ ((packed));
+} __packed;
static void iucv_message_complete(struct iucv_irq_data *data)
{
@@ -1694,7 +1694,7 @@ struct iucv_message_pending {
u32 ipbfln2f;
u8 ippollfg;
u8 res2[3];
-} __attribute__ ((packed));
+} __packed;
static void iucv_message_pending(struct iucv_irq_data *data)
{
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 0852512d392c..226a0ae3bcfd 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -348,7 +348,7 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
sk->sk_state = TCP_ESTABLISHED;
inet->inet_id = jiffies;
- sk_dst_set(sk, &rt->u.dst);
+ sk_dst_set(sk, &rt->dst);
write_lock_bh(&l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
@@ -496,9 +496,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
goto no_route;
}
- sk_setup_caps(sk, &rt->u.dst);
+ sk_setup_caps(sk, &rt->dst);
}
- skb_dst_set(skb, dst_clone(&rt->u.dst));
+ skb_dst_set(skb, dst_clone(&rt->dst));
/* Queue the packet to IP for output */
rc = ip_queue_xmit(skb);
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 8a91f6c0bb18..83eec7a8bd1f 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -33,6 +33,13 @@ config MAC80211_RC_MINSTREL
---help---
This option enables the 'minstrel' TX rate control algorithm
+config MAC80211_RC_MINSTREL_HT
+ bool "Minstrel 802.11n support" if EMBEDDED
+ depends on MAC80211_RC_MINSTREL
+ default y
+ ---help---
+ This option enables the 'minstrel_ht' TX rate control algorithm
+
choice
prompt "Default rate control algorithm"
depends on MAC80211_HAS_RC
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 84b48ba8a77e..fdb54e61d637 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -51,7 +51,11 @@ rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o
rc80211_minstrel-y := rc80211_minstrel.o
rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o
+rc80211_minstrel_ht-y := rc80211_minstrel_ht.o
+rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o
+
mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
+mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 6bb9a9a94960..965b272499fd 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -6,39 +6,70 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007-2008, Intel Corporation
+ * Copyright 2007-2010, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+/**
+ * DOC: RX A-MPDU aggregation
+ *
+ * Aggregation on the RX side requires only implementing the
+ * @ampdu_action callback that is invoked to start/stop any
+ * block-ack sessions for RX aggregation.
+ *
+ * When RX aggregation is started by the peer, the driver is
+ * notified via @ampdu_action function, with the
+ * %IEEE80211_AMPDU_RX_START action, and may reject the request
+ * in which case a negative response is sent to the peer, if it
+ * accepts it a positive response is sent.
+ *
+ * While the session is active, the device/driver are required
+ * to de-aggregate frames and pass them up one by one to mac80211,
+ * which will handle the reorder buffer.
+ *
+ * When the aggregation session is stopped again by the peer or
+ * ourselves, the driver's @ampdu_action function will be called
+ * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the
+ * call must not fail.
+ */
+
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
-static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
- u16 initiator, u16 reason,
- bool from_timer)
+static void ieee80211_free_tid_rx(struct rcu_head *h)
{
- struct ieee80211_local *local = sta->local;
- struct tid_ampdu_rx *tid_rx;
+ struct tid_ampdu_rx *tid_rx =
+ container_of(h, struct tid_ampdu_rx, rcu_head);
int i;
- spin_lock_bh(&sta->lock);
+ for (i = 0; i < tid_rx->buf_size; i++)
+ dev_kfree_skb(tid_rx->reorder_buf[i]);
+ kfree(tid_rx->reorder_buf);
+ kfree(tid_rx->reorder_time);
+ kfree(tid_rx);
+}
- /* check if TID is in operational state */
- if (!sta->ampdu_mlme.tid_active_rx[tid]) {
- spin_unlock_bh(&sta->lock);
- return;
- }
+void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ u16 initiator, u16 reason)
+{
+ struct ieee80211_local *local = sta->local;
+ struct tid_ampdu_rx *tid_rx;
- sta->ampdu_mlme.tid_active_rx[tid] = false;
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
tid_rx = sta->ampdu_mlme.tid_rx[tid];
+ if (!tid_rx)
+ return;
+
+ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL);
+
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
@@ -54,32 +85,17 @@ static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
ieee80211_send_delba(sta->sdata, sta->sta.addr,
tid, 0, reason);
- /* free the reordering buffer */
- for (i = 0; i < tid_rx->buf_size; i++) {
- if (tid_rx->reorder_buf[i]) {
- /* release the reordered frames */
- dev_kfree_skb(tid_rx->reorder_buf[i]);
- tid_rx->stored_mpdu_num--;
- tid_rx->reorder_buf[i] = NULL;
- }
- }
-
- /* free resources */
- kfree(tid_rx->reorder_buf);
- kfree(tid_rx->reorder_time);
- sta->ampdu_mlme.tid_rx[tid] = NULL;
-
- spin_unlock_bh(&sta->lock);
+ del_timer_sync(&tid_rx->session_timer);
- if (!from_timer)
- del_timer_sync(&tid_rx->session_timer);
- kfree(tid_rx);
+ call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
}
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason)
{
- ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false);
+ mutex_lock(&sta->ampdu_mlme.mtx);
+ ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
}
/*
@@ -100,8 +116,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
#endif
- ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT,
- WLAN_REASON_QSTA_TIMEOUT, true);
+ set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
+ ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
}
static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
@@ -212,9 +228,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
/* examine state machine */
- spin_lock_bh(&sta->lock);
+ mutex_lock(&sta->ampdu_mlme.mtx);
- if (sta->ampdu_mlme.tid_active_rx[tid]) {
+ if (sta->ampdu_mlme.tid_rx[tid]) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "unexpected AddBA Req from "
@@ -225,9 +241,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
}
/* prepare A-MPDU MLME for Rx aggregation */
- sta->ampdu_mlme.tid_rx[tid] =
- kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
- if (!sta->ampdu_mlme.tid_rx[tid]) {
+ tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
+ if (!tid_agg_rx) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
@@ -235,14 +250,11 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
#endif
goto end;
}
- /* rx timer */
- sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
- sta_rx_agg_session_timer_expired;
- sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
- (unsigned long)&sta->timer_to_tid[tid];
- init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
- tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
+ /* rx timer */
+ tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
+ tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+ init_timer(&tid_agg_rx->session_timer);
/* prepare reordering buffer */
tid_agg_rx->reorder_buf =
@@ -257,8 +269,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
#endif
kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx->reorder_time);
- kfree(sta->ampdu_mlme.tid_rx[tid]);
- sta->ampdu_mlme.tid_rx[tid] = NULL;
+ kfree(tid_agg_rx);
goto end;
}
@@ -270,13 +281,12 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
if (ret) {
kfree(tid_agg_rx->reorder_buf);
+ kfree(tid_agg_rx->reorder_time);
kfree(tid_agg_rx);
- sta->ampdu_mlme.tid_rx[tid] = NULL;
goto end;
}
- /* change state and send addba resp */
- sta->ampdu_mlme.tid_active_rx[tid] = true;
+ /* update data */
tid_agg_rx->dialog_token = dialog_token;
tid_agg_rx->ssn = start_seq_num;
tid_agg_rx->head_seq_num = start_seq_num;
@@ -284,8 +294,15 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
tid_agg_rx->timeout = timeout;
tid_agg_rx->stored_mpdu_num = 0;
status = WLAN_STATUS_SUCCESS;
+
+ /* activate it for RX */
+ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
+
+ if (timeout)
+ mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
+
end:
- spin_unlock_bh(&sta->lock);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
end_no_lock:
ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 98258b7341e3..c893f236acea 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -6,7 +6,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007-2009, Intel Corporation
+ * Copyright 2007-2010, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -21,28 +21,39 @@
#include "wme.h"
/**
- * DOC: TX aggregation
+ * DOC: TX A-MPDU aggregation
*
* Aggregation on the TX side requires setting the hardware flag
- * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues
- * hardware parameter to the number of hardware AMPDU queues. If there are no
- * hardware queues then the driver will (currently) have to do all frame
- * buffering.
+ * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed
+ * packets with a flag indicating A-MPDU aggregation. The driver
+ * or device is responsible for actually aggregating the frames,
+ * as well as deciding how many and which to aggregate.
*
- * When TX aggregation is started by some subsystem (usually the rate control
- * algorithm would be appropriate) by calling the
- * ieee80211_start_tx_ba_session() function, the driver will be notified via
- * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action.
+ * When TX aggregation is started by some subsystem (usually the rate
+ * control algorithm would be appropriate) by calling the
+ * ieee80211_start_tx_ba_session() function, the driver will be
+ * notified via its @ampdu_action function, with the
+ * %IEEE80211_AMPDU_TX_START action.
*
* In response to that, the driver is later required to call the
- * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe())
- * function, which will start the aggregation session.
+ * ieee80211_start_tx_ba_cb_irqsafe() function, which will really
+ * start the aggregation session after the peer has also responded.
+ * If the peer responds negatively, the session will be stopped
+ * again right away. Note that it is possible for the aggregation
+ * session to be stopped before the driver has indicated that it
+ * is done setting it up, in which case it must not indicate the
+ * setup completion.
*
- * Similarly, when the aggregation session is stopped by
- * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will
- * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the
- * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb()
- * (or ieee80211_stop_tx_ba_cb_irqsafe()).
+ * Also note that, since we also need to wait for a response from
+ * the peer, the driver is notified of the completion of the
+ * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the
+ * @ampdu_action callback.
+ *
+ * Similarly, when the aggregation session is stopped by the peer
+ * or something calling ieee80211_stop_tx_ba_session(), the driver's
+ * @ampdu_action function will be called with the action
+ * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
+ * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
*/
static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
@@ -125,25 +136,53 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
ieee80211_tx_skb(sdata, skb);
}
+static void kfree_tid_tx(struct rcu_head *rcu_head)
+{
+ struct tid_ampdu_tx *tid_tx =
+ container_of(rcu_head, struct tid_ampdu_tx, rcu_head);
+
+ kfree(tid_tx);
+}
+
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator)
{
struct ieee80211_local *local = sta->local;
+ struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
int ret;
- u8 *state;
+
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
+ if (!tid_tx)
+ return -ENOENT;
+
+ spin_lock_bh(&sta->lock);
+
+ if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
+ /* not even started yet! */
+ rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
+ spin_unlock_bh(&sta->lock);
+ call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
+ return 0;
+ }
+
+ spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- state = &sta->ampdu_mlme.tid_state_tx[tid];
+ set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
- if (*state == HT_AGG_STATE_OPERATIONAL)
- sta->ampdu_mlme.addba_req_num[tid] = 0;
+ /*
+ * After this packets are no longer handed right through
+ * to the driver but are put onto tid_tx->pending instead,
+ * with locking to ensure proper access.
+ */
+ clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
- *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
- (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
+ tid_tx->stop_initiator = initiator;
ret = drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_STOP,
@@ -174,16 +213,14 @@ static void sta_addba_resp_timer_expired(unsigned long data)
u16 tid = *(u8 *)data;
struct sta_info *sta = container_of((void *)data,
struct sta_info, timer_to_tid[tid]);
- u8 *state;
-
- state = &sta->ampdu_mlme.tid_state_tx[tid];
+ struct tid_ampdu_tx *tid_tx;
/* check if the TID waits for addBA response */
- spin_lock_bh(&sta->lock);
- if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
- HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
- HT_ADDBA_REQUESTED_MSK) {
- spin_unlock_bh(&sta->lock);
+ rcu_read_lock();
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx ||
+ test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
+ rcu_read_unlock();
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "timer expired on tid %d but we are not "
"(or no longer) expecting addBA response there\n",
@@ -196,8 +233,8 @@ static void sta_addba_resp_timer_expired(unsigned long data)
printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
#endif
- ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
- spin_unlock_bh(&sta->lock);
+ ieee80211_stop_tx_ba_session(&sta->sta, tid);
+ rcu_read_unlock();
}
static inline int ieee80211_ac_from_tid(int tid)
@@ -205,14 +242,112 @@ static inline int ieee80211_ac_from_tid(int tid)
return ieee802_1d_to_ac[tid & 7];
}
+/*
+ * When multiple aggregation sessions on multiple stations
+ * are being created/destroyed simultaneously, we need to
+ * refcount the global queue stop caused by that in order
+ * to not get into a situation where one of the aggregation
+ * setup or teardown re-enables queues before the other is
+ * ready to handle that.
+ *
+ * These two functions take care of this issue by keeping
+ * a global "agg_queue_stop" refcount.
+ */
+static void __acquires(agg_queue)
+ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
+{
+ int queue = ieee80211_ac_from_tid(tid);
+
+ if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
+ ieee80211_stop_queue_by_reason(
+ &local->hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ __acquire(agg_queue);
+}
+
+static void __releases(agg_queue)
+ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
+{
+ int queue = ieee80211_ac_from_tid(tid);
+
+ if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
+ ieee80211_wake_queue_by_reason(
+ &local->hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ __release(agg_queue);
+}
+
+void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
+{
+ struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u16 start_seq_num;
+ int ret;
+
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
+ /*
+ * While we're asking the driver about the aggregation,
+ * stop the AC queue so that we don't have to worry
+ * about frames that came in while we were doing that,
+ * which would require us to put them to the AC pending
+ * afterwards which just makes the code more complex.
+ */
+ ieee80211_stop_queue_agg(local, tid);
+
+ clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
+
+ /*
+ * make sure no packets are being processed to get
+ * valid starting sequence number
+ */
+ synchronize_net();
+
+ start_seq_num = sta->tid_seq[tid] >> 4;
+
+ ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
+ &sta->sta, tid, &start_seq_num);
+ if (ret) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "BA request denied - HW unavailable for"
+ " tid %d\n", tid);
+#endif
+ spin_lock_bh(&sta->lock);
+ rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
+ spin_unlock_bh(&sta->lock);
+
+ ieee80211_wake_queue_agg(local, tid);
+ call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
+ return;
+ }
+
+ /* we can take packets again now */
+ ieee80211_wake_queue_agg(local, tid);
+
+ /* activate the timer for the recipient's addBA response */
+ mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
+#endif
+
+ spin_lock_bh(&sta->lock);
+ sta->ampdu_mlme.addba_req_num[tid]++;
+ spin_unlock_bh(&sta->lock);
+
+ /* send AddBA request */
+ ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
+ tid_tx->dialog_token, start_seq_num,
+ 0x40, 5000);
+}
+
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
- u8 *state;
+ struct tid_ampdu_tx *tid_tx;
int ret = 0;
- u16 start_seq_num;
trace_api_start_tx_ba_session(pubsta, tid);
@@ -239,24 +374,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
sdata->vif.type != NL80211_IFTYPE_AP)
return -EINVAL;
- if (test_sta_flags(sta, WLAN_STA_DISASSOC)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Disassociation is in progress. "
- "Denying BA session request\n");
-#endif
- return -EINVAL;
- }
-
if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Suspend in progress. "
+ printk(KERN_DEBUG "BA sessions blocked. "
"Denying BA session request\n");
#endif
return -EINVAL;
}
spin_lock_bh(&sta->lock);
- spin_lock(&local->ampdu_lock);
/* we have tried too many times, receiver does not want A-MPDU */
if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
@@ -264,9 +390,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
goto err_unlock_sta;
}
- state = &sta->ampdu_mlme.tid_state_tx[tid];
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
/* check if the TID is not in aggregation flow already */
- if (*state != HT_AGG_STATE_IDLE) {
+ if (tid_tx) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "BA request denied - session is not "
"idle on tid %u\n", tid);
@@ -275,96 +401,37 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
goto err_unlock_sta;
}
- /*
- * While we're asking the driver about the aggregation,
- * stop the AC queue so that we don't have to worry
- * about frames that came in while we were doing that,
- * which would require us to put them to the AC pending
- * afterwards which just makes the code more complex.
- */
- ieee80211_stop_queue_by_reason(
- &local->hw, ieee80211_ac_from_tid(tid),
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
-
/* prepare A-MPDU MLME for Tx aggregation */
- sta->ampdu_mlme.tid_tx[tid] =
- kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
- if (!sta->ampdu_mlme.tid_tx[tid]) {
+ tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
+ if (!tid_tx) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
tid);
#endif
ret = -ENOMEM;
- goto err_wake_queue;
+ goto err_unlock_sta;
}
- skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending);
+ skb_queue_head_init(&tid_tx->pending);
+ __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
/* Tx timer */
- sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
- sta_addba_resp_timer_expired;
- sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
- (unsigned long)&sta->timer_to_tid[tid];
- init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-
- /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
- * call back right away, it must see that the flow has begun */
- *state |= HT_ADDBA_REQUESTED_MSK;
-
- start_seq_num = sta->tid_seq[tid] >> 4;
-
- ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
- pubsta, tid, &start_seq_num);
+ tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
+ tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+ init_timer(&tid_tx->addba_resp_timer);
- if (ret) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - HW unavailable for"
- " tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
- *state = HT_AGG_STATE_IDLE;
- goto err_free;
- }
-
- /* Driver vetoed or OKed, but we can take packets again now */
- ieee80211_wake_queue_by_reason(
- &local->hw, ieee80211_ac_from_tid(tid),
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
-
- spin_unlock(&local->ampdu_lock);
-
- /* prepare tid data */
+ /* assign a dialog token */
sta->ampdu_mlme.dialog_token_allocator++;
- sta->ampdu_mlme.tid_tx[tid]->dialog_token =
- sta->ampdu_mlme.dialog_token_allocator;
- sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
+ tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
- spin_unlock_bh(&sta->lock);
+ /* finally, assign it to the array */
+ rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
- /* send AddBA request */
- ieee80211_send_addba_request(sdata, pubsta->addr, tid,
- sta->ampdu_mlme.tid_tx[tid]->dialog_token,
- sta->ampdu_mlme.tid_tx[tid]->ssn,
- 0x40, 5000);
- sta->ampdu_mlme.addba_req_num[tid]++;
- /* activate the timer for the recipient's addBA response */
- sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
- jiffies + ADDBA_RESP_INTERVAL;
- add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
-#endif
- return 0;
-
- err_free:
- kfree(sta->ampdu_mlme.tid_tx[tid]);
- sta->ampdu_mlme.tid_tx[tid] = NULL;
- err_wake_queue:
- ieee80211_wake_queue_by_reason(
- &local->hw, ieee80211_ac_from_tid(tid),
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+
+ /* this flow continues off the work */
err_unlock_sta:
- spin_unlock(&local->ampdu_lock);
spin_unlock_bh(&sta->lock);
return ret;
}
@@ -372,69 +439,65 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
/*
* splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish and holding
- * local->ampdu_lock across both calls.
+ * requires a call to ieee80211_agg_splice_finish later
*/
-static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
- struct sta_info *sta, u16 tid)
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ struct tid_ampdu_tx *tid_tx, u16 tid)
{
+ int queue = ieee80211_ac_from_tid(tid);
unsigned long flags;
- u16 queue = ieee80211_ac_from_tid(tid);
-
- ieee80211_stop_queue_by_reason(
- &local->hw, queue,
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK))
- return;
+ ieee80211_stop_queue_agg(local, tid);
- if (WARN(!sta->ampdu_mlme.tid_tx[tid],
- "TID %d gone but expected when splicing aggregates from"
- "the pending queue\n", tid))
+ if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+ " from the pending queue\n", tid))
return;
- if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
+ if (!skb_queue_empty(&tid_tx->pending)) {
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
/* copy over remaining packets */
- skb_queue_splice_tail_init(
- &sta->ampdu_mlme.tid_tx[tid]->pending,
- &local->pending[queue]);
+ skb_queue_splice_tail_init(&tid_tx->pending,
+ &local->pending[queue]);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
}
-static void ieee80211_agg_splice_finish(struct ieee80211_local *local,
- struct sta_info *sta, u16 tid)
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
{
- u16 queue = ieee80211_ac_from_tid(tid);
-
- ieee80211_wake_queue_by_reason(
- &local->hw, queue,
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ ieee80211_wake_queue_agg(local, tid);
}
-/* caller must hold sta->lock */
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
#endif
- spin_lock(&local->ampdu_lock);
- ieee80211_agg_splice_packets(local, sta, tid);
- /*
- * NB: we rely on sta->lock being taken in the TX
- * processing here when adding to the pending queue,
- * otherwise we could only change the state of the
- * session to OPERATIONAL _here_.
- */
- ieee80211_agg_splice_finish(local, sta, tid);
- spin_unlock(&local->ampdu_lock);
-
drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_OPERATIONAL,
&sta->sta, tid, NULL);
+
+ /*
+ * synchronize with TX path, while splicing the TX path
+ * should block so it won't put more packets onto pending.
+ */
+ spin_lock_bh(&sta->lock);
+
+ ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid);
+ /*
+ * Now mark as operational. This will be visible
+ * in the TX path, and lets it go lock-free in
+ * the common case.
+ */
+ set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state);
+ ieee80211_agg_splice_finish(local, tid);
+
+ spin_unlock_bh(&sta->lock);
}
void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
@@ -442,7 +505,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- u8 *state;
+ struct tid_ampdu_tx *tid_tx;
trace_api_start_tx_ba_cb(sdata, ra, tid);
@@ -454,42 +517,36 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
return;
}
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, ra);
if (!sta) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Could not find station: %pM\n", ra);
#endif
return;
}
- state = &sta->ampdu_mlme.tid_state_tx[tid];
- spin_lock_bh(&sta->lock);
+ mutex_lock(&sta->ampdu_mlme.mtx);
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
- if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) {
+ if (WARN_ON(!tid_tx)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
- *state);
+ printk(KERN_DEBUG "addBA was not requested!\n");
#endif
- spin_unlock_bh(&sta->lock);
- rcu_read_unlock();
- return;
+ goto unlock;
}
- if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK))
- goto out;
-
- *state |= HT_ADDBA_DRV_READY_MSK;
+ if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
+ goto unlock;
- if (*state == HT_AGG_STATE_OPERATIONAL)
+ if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
ieee80211_agg_tx_operational(local, sta, tid);
- out:
- spin_unlock_bh(&sta->lock);
- rcu_read_unlock();
+ unlock:
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+ mutex_unlock(&local->sta_mtx);
}
-EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
const u8 *ra, u16 tid)
@@ -510,44 +567,36 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
- ra_tid->vif = vif;
- skb->pkt_type = IEEE80211_ADDBA_MSG;
- skb_queue_tail(&local->skb_queue, skb);
- tasklet_schedule(&local->tasklet);
+ skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
+ skb_queue_tail(&sdata->skb_queue, skb);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator)
{
- u8 *state;
int ret;
- /* check if the TID is in aggregation */
- state = &sta->ampdu_mlme.tid_state_tx[tid];
- spin_lock_bh(&sta->lock);
-
- if (*state != HT_AGG_STATE_OPERATIONAL) {
- ret = -ENOENT;
- goto unlock;
- }
+ mutex_lock(&sta->ampdu_mlme.mtx);
ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
- unlock:
- spin_unlock_bh(&sta->lock);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+
return ret;
}
-int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
- enum ieee80211_back_parties initiator)
+int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
+ struct tid_ampdu_tx *tid_tx;
+ int ret = 0;
- trace_api_stop_tx_ba_session(pubsta, tid, initiator);
+ trace_api_stop_tx_ba_session(pubsta, tid);
if (!local->ops->ampdu_action)
return -EINVAL;
@@ -555,7 +604,26 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
if (tid >= STA_TID_NUM)
return -EINVAL;
- return __ieee80211_stop_tx_ba_session(sta, tid, initiator);
+ spin_lock_bh(&sta->lock);
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
+
+ if (!tid_tx) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ /* already in progress stopping it */
+ ret = 0;
+ goto unlock;
+ }
+
+ set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
+ ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+
+ unlock:
+ spin_unlock_bh(&sta->lock);
+ return ret;
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
@@ -564,7 +632,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- u8 *state;
+ struct tid_ampdu_tx *tid_tx;
trace_api_stop_tx_ba_cb(sdata, ra, tid);
@@ -581,51 +649,56 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
ra, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
+
sta = sta_info_get(sdata, ra);
if (!sta) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Could not find station: %pM\n", ra);
#endif
- rcu_read_unlock();
- return;
+ goto unlock;
}
- state = &sta->ampdu_mlme.tid_state_tx[tid];
- /* NOTE: no need to use sta->lock in this state check, as
- * ieee80211_stop_tx_ba_session will let only one stop call to
- * pass through per sta/tid
- */
- if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
+ mutex_lock(&sta->ampdu_mlme.mtx);
+ spin_lock_bh(&sta->lock);
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
+
+ if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
#endif
- rcu_read_unlock();
- return;
+ goto unlock_sta;
}
- if (*state & HT_AGG_STATE_INITIATOR_MSK)
+ if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR)
ieee80211_send_delba(sta->sdata, ra, tid,
WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
- spin_lock_bh(&sta->lock);
- spin_lock(&local->ampdu_lock);
+ /*
+ * When we get here, the TX path will not be lockless any more wrt.
+ * aggregation, since the OPERATIONAL bit has long been cleared.
+ * Thus it will block on getting the lock, if it occurs. So if we
+ * stop the queue now, we will not get any more packets, and any
+ * that might be being processed will wait for us here, thereby
+ * guaranteeing that no packets go to the tid_tx pending queue any
+ * more.
+ */
- ieee80211_agg_splice_packets(local, sta, tid);
+ ieee80211_agg_splice_packets(local, tid_tx, tid);
- *state = HT_AGG_STATE_IDLE;
- /* from now on packets are no longer put onto sta->pending */
- kfree(sta->ampdu_mlme.tid_tx[tid]);
- sta->ampdu_mlme.tid_tx[tid] = NULL;
+ /* future packets must not find the tid_tx struct any more */
+ rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
- ieee80211_agg_splice_finish(local, sta, tid);
+ ieee80211_agg_splice_finish(local, tid);
- spin_unlock(&local->ampdu_lock);
- spin_unlock_bh(&sta->lock);
+ call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
- rcu_read_unlock();
+ unlock_sta:
+ spin_unlock_bh(&sta->lock);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+ unlock:
+ mutex_unlock(&local->sta_mtx);
}
-EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
const u8 *ra, u16 tid)
@@ -646,11 +719,10 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
- ra_tid->vif = vif;
- skb->pkt_type = IEEE80211_DELBA_MSG;
- skb_queue_tail(&local->skb_queue, skb);
- tasklet_schedule(&local->tasklet);
+ skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
+ skb_queue_tail(&sdata->skb_queue, skb);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
@@ -660,40 +732,40 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
struct ieee80211_mgmt *mgmt,
size_t len)
{
+ struct tid_ampdu_tx *tid_tx;
u16 capab, tid;
- u8 *state;
capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
- state = &sta->ampdu_mlme.tid_state_tx[tid];
-
- spin_lock_bh(&sta->lock);
+ mutex_lock(&sta->ampdu_mlme.mtx);
- if (!(*state & HT_ADDBA_REQUESTED_MSK))
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
+ if (!tid_tx)
goto out;
- if (mgmt->u.action.u.addba_resp.dialog_token !=
- sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
+ if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+#endif
goto out;
}
- del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+ del_timer(&tid_tx->addba_resp_timer);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+#endif
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
== WLAN_STATUS_SUCCESS) {
- u8 curstate = *state;
-
- *state |= HT_ADDBA_RECEIVED_MSK;
+ if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
+ &tid_tx->state)) {
+ /* ignore duplicate response */
+ goto out;
+ }
- if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL)
+ if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
ieee80211_agg_tx_operational(local, sta, tid);
sta->ampdu_mlme.addba_req_num[tid] = 0;
@@ -702,5 +774,5 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
}
out:
- spin_unlock_bh(&sta->lock);
+ mutex_unlock(&sta->ampdu_mlme.mtx);
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c7000a6ca379..ed8c9f5be94f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -120,6 +120,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_key *key;
int err;
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
switch (params->cipher) {
@@ -145,7 +148,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
if (!key)
return -ENOMEM;
- rcu_read_lock();
+ mutex_lock(&sdata->local->sta_mtx);
if (mac_addr) {
sta = sta_info_get_bss(sdata, mac_addr);
@@ -160,7 +163,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
err = 0;
out_unlock:
- rcu_read_unlock();
+ mutex_unlock(&sdata->local->sta_mtx);
return err;
}
@@ -174,7 +177,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- rcu_read_lock();
+ mutex_lock(&sdata->local->sta_mtx);
if (mac_addr) {
ret = -ENOENT;
@@ -202,7 +205,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
ret = 0;
out_unlock:
- rcu_read_unlock();
+ mutex_unlock(&sdata->local->sta_mtx);
return ret;
}
@@ -305,15 +308,10 @@ static int ieee80211_config_default_key(struct wiphy *wiphy,
struct net_device *dev,
u8 key_idx)
{
- struct ieee80211_sub_if_data *sdata;
-
- rcu_read_lock();
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
ieee80211_set_default_key(sdata, key_idx);
- rcu_read_unlock();
-
return 0;
}
@@ -600,7 +598,7 @@ struct iapp_layer2_update {
u8 ssap; /* 0 */
u8 control;
u8 xid_info[3];
-} __attribute__ ((packed));
+} __packed;
static void ieee80211_send_layer2_update(struct sta_info *sta)
{
@@ -1448,7 +1446,6 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- struct ieee80211_conf *conf = &local->hw.conf;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
@@ -1457,11 +1454,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
if (enabled == sdata->u.mgd.powersave &&
- timeout == conf->dynamic_ps_forced_timeout)
+ timeout == local->dynamic_ps_forced_timeout)
return 0;
sdata->u.mgd.powersave = enabled;
- conf->dynamic_ps_forced_timeout = timeout;
+ local->dynamic_ps_forced_timeout = timeout;
/* no change, but if automatic follow powersave */
mutex_lock(&sdata->u.mgd.mtx);
@@ -1554,10 +1551,58 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
+ bool channel_type_valid,
const u8 *buf, size_t len, u64 *cookie)
{
- return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan,
- channel_type, buf, len, cookie);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct sta_info *sta;
+ const struct ieee80211_mgmt *mgmt = (void *)buf;
+ u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ /* Check that we are on the requested channel for transmission */
+ if (chan != local->tmp_channel &&
+ chan != local->oper_channel)
+ return -EBUSY;
+ if (channel_type_valid &&
+ (channel_type != local->tmp_channel_type &&
+ channel_type != local->_oper_channel_type))
+ return -EBUSY;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_ADHOC:
+ if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
+ break;
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->da);
+ rcu_read_unlock();
+ if (!sta)
+ return -ENOLINK;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
+ flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ memcpy(skb_put(skb, len), buf, len);
+
+ IEEE80211_SKB_CB(skb)->flags = flags;
+
+ skb->dev = sdata->dev;
+ ieee80211_tx_skb(sdata, skb);
+
+ *cookie = (unsigned long) skb;
+ return 0;
}
struct cfg80211_ops mac80211_config_ops = {
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 637929b65ccc..a694c593ff6a 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -307,9 +307,6 @@ static const struct file_operations queues_ops = {
/* statistics stuff */
-#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
- DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value)
-
static ssize_t format_devstat_counter(struct ieee80211_local *local,
char __user *userbuf,
size_t count, loff_t *ppos,
@@ -351,75 +348,16 @@ static const struct file_operations stats_ ##name## _ops = { \
.open = mac80211_open_file_generic, \
};
-#define DEBUGFS_STATS_ADD(name) \
+#define DEBUGFS_STATS_ADD(name, field) \
+ debugfs_create_u32(#name, 0400, statsd, (u32 *) &field);
+#define DEBUGFS_DEVSTATS_ADD(name) \
debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
-DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u",
- local->dot11TransmittedFragmentCount);
-DEBUGFS_STATS_FILE(multicast_transmitted_frame_count, 20, "%u",
- local->dot11MulticastTransmittedFrameCount);
-DEBUGFS_STATS_FILE(failed_count, 20, "%u",
- local->dot11FailedCount);
-DEBUGFS_STATS_FILE(retry_count, 20, "%u",
- local->dot11RetryCount);
-DEBUGFS_STATS_FILE(multiple_retry_count, 20, "%u",
- local->dot11MultipleRetryCount);
-DEBUGFS_STATS_FILE(frame_duplicate_count, 20, "%u",
- local->dot11FrameDuplicateCount);
-DEBUGFS_STATS_FILE(received_fragment_count, 20, "%u",
- local->dot11ReceivedFragmentCount);
-DEBUGFS_STATS_FILE(multicast_received_frame_count, 20, "%u",
- local->dot11MulticastReceivedFrameCount);
-DEBUGFS_STATS_FILE(transmitted_frame_count, 20, "%u",
- local->dot11TransmittedFrameCount);
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
-DEBUGFS_STATS_FILE(tx_handlers_drop, 20, "%u",
- local->tx_handlers_drop);
-DEBUGFS_STATS_FILE(tx_handlers_queued, 20, "%u",
- local->tx_handlers_queued);
-DEBUGFS_STATS_FILE(tx_handlers_drop_unencrypted, 20, "%u",
- local->tx_handlers_drop_unencrypted);
-DEBUGFS_STATS_FILE(tx_handlers_drop_fragment, 20, "%u",
- local->tx_handlers_drop_fragment);
-DEBUGFS_STATS_FILE(tx_handlers_drop_wep, 20, "%u",
- local->tx_handlers_drop_wep);
-DEBUGFS_STATS_FILE(tx_handlers_drop_not_assoc, 20, "%u",
- local->tx_handlers_drop_not_assoc);
-DEBUGFS_STATS_FILE(tx_handlers_drop_unauth_port, 20, "%u",
- local->tx_handlers_drop_unauth_port);
-DEBUGFS_STATS_FILE(rx_handlers_drop, 20, "%u",
- local->rx_handlers_drop);
-DEBUGFS_STATS_FILE(rx_handlers_queued, 20, "%u",
- local->rx_handlers_queued);
-DEBUGFS_STATS_FILE(rx_handlers_drop_nullfunc, 20, "%u",
- local->rx_handlers_drop_nullfunc);
-DEBUGFS_STATS_FILE(rx_handlers_drop_defrag, 20, "%u",
- local->rx_handlers_drop_defrag);
-DEBUGFS_STATS_FILE(rx_handlers_drop_short, 20, "%u",
- local->rx_handlers_drop_short);
-DEBUGFS_STATS_FILE(rx_handlers_drop_passive_scan, 20, "%u",
- local->rx_handlers_drop_passive_scan);
-DEBUGFS_STATS_FILE(tx_expand_skb_head, 20, "%u",
- local->tx_expand_skb_head);
-DEBUGFS_STATS_FILE(tx_expand_skb_head_cloned, 20, "%u",
- local->tx_expand_skb_head_cloned);
-DEBUGFS_STATS_FILE(rx_expand_skb_head, 20, "%u",
- local->rx_expand_skb_head);
-DEBUGFS_STATS_FILE(rx_expand_skb_head2, 20, "%u",
- local->rx_expand_skb_head2);
-DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u",
- local->rx_handlers_fragments);
-DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u",
- local->tx_status_drop);
-
-#endif
-
DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount);
DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount);
DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount);
DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount);
-
void debugfs_hw_add(struct ieee80211_local *local)
{
struct dentry *phyd = local->hw.wiphy->debugfsdir;
@@ -448,38 +386,60 @@ void debugfs_hw_add(struct ieee80211_local *local)
if (!statsd)
return;
- DEBUGFS_STATS_ADD(transmitted_fragment_count);
- DEBUGFS_STATS_ADD(multicast_transmitted_frame_count);
- DEBUGFS_STATS_ADD(failed_count);
- DEBUGFS_STATS_ADD(retry_count);
- DEBUGFS_STATS_ADD(multiple_retry_count);
- DEBUGFS_STATS_ADD(frame_duplicate_count);
- DEBUGFS_STATS_ADD(received_fragment_count);
- DEBUGFS_STATS_ADD(multicast_received_frame_count);
- DEBUGFS_STATS_ADD(transmitted_frame_count);
+ DEBUGFS_STATS_ADD(transmitted_fragment_count,
+ local->dot11TransmittedFragmentCount);
+ DEBUGFS_STATS_ADD(multicast_transmitted_frame_count,
+ local->dot11MulticastTransmittedFrameCount);
+ DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount);
+ DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount);
+ DEBUGFS_STATS_ADD(multiple_retry_count,
+ local->dot11MultipleRetryCount);
+ DEBUGFS_STATS_ADD(frame_duplicate_count,
+ local->dot11FrameDuplicateCount);
+ DEBUGFS_STATS_ADD(received_fragment_count,
+ local->dot11ReceivedFragmentCount);
+ DEBUGFS_STATS_ADD(multicast_received_frame_count,
+ local->dot11MulticastReceivedFrameCount);
+ DEBUGFS_STATS_ADD(transmitted_frame_count,
+ local->dot11TransmittedFrameCount);
#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
- DEBUGFS_STATS_ADD(tx_handlers_drop);
- DEBUGFS_STATS_ADD(tx_handlers_queued);
- DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted);
- DEBUGFS_STATS_ADD(tx_handlers_drop_fragment);
- DEBUGFS_STATS_ADD(tx_handlers_drop_wep);
- DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc);
- DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port);
- DEBUGFS_STATS_ADD(rx_handlers_drop);
- DEBUGFS_STATS_ADD(rx_handlers_queued);
- DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
- DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
- DEBUGFS_STATS_ADD(rx_handlers_drop_short);
- DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan);
- DEBUGFS_STATS_ADD(tx_expand_skb_head);
- DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
- DEBUGFS_STATS_ADD(rx_expand_skb_head);
- DEBUGFS_STATS_ADD(rx_expand_skb_head2);
- DEBUGFS_STATS_ADD(rx_handlers_fragments);
- DEBUGFS_STATS_ADD(tx_status_drop);
+ DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
+ DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
+ DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted,
+ local->tx_handlers_drop_unencrypted);
+ DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
+ local->tx_handlers_drop_fragment);
+ DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
+ local->tx_handlers_drop_wep);
+ DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc,
+ local->tx_handlers_drop_not_assoc);
+ DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port,
+ local->tx_handlers_drop_unauth_port);
+ DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop);
+ DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued);
+ DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc,
+ local->rx_handlers_drop_nullfunc);
+ DEBUGFS_STATS_ADD(rx_handlers_drop_defrag,
+ local->rx_handlers_drop_defrag);
+ DEBUGFS_STATS_ADD(rx_handlers_drop_short,
+ local->rx_handlers_drop_short);
+ DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan,
+ local->rx_handlers_drop_passive_scan);
+ DEBUGFS_STATS_ADD(tx_expand_skb_head,
+ local->tx_expand_skb_head);
+ DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
+ local->tx_expand_skb_head_cloned);
+ DEBUGFS_STATS_ADD(rx_expand_skb_head,
+ local->rx_expand_skb_head);
+ DEBUGFS_STATS_ADD(rx_expand_skb_head2,
+ local->rx_expand_skb_head2);
+ DEBUGFS_STATS_ADD(rx_handlers_fragments,
+ local->rx_handlers_fragments);
+ DEBUGFS_STATS_ADD(tx_status_drop,
+ local->tx_status_drop);
#endif
- DEBUGFS_STATS_ADD(dot11ACKFailureCount);
- DEBUGFS_STATS_ADD(dot11RTSFailureCount);
- DEBUGFS_STATS_ADD(dot11FCSErrorCount);
- DEBUGFS_STATS_ADD(dot11RTSSuccessCount);
+ DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount);
+ DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
+ DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount);
+ DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount);
}
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 97c9e46e859e..fa5e76e658ef 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -143,7 +143,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
len = p - buf;
break;
case ALG_CCMP:
- for (i = 0; i < NUM_RX_DATA_QUEUES; i++) {
+ for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) {
rpn = key->u.ccmp.rx_pn[i];
p += scnprintf(p, sizeof(buf)+buf-p,
"%02x%02x%02x%02x%02x%02x\n",
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index e763f1529ddb..76839d4dfaac 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -30,7 +30,6 @@ static ssize_t sta_ ##name## _read(struct file *file, \
}
#define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n")
#define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n")
-#define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n")
#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n")
#define STA_OPS(name) \
@@ -52,19 +51,7 @@ static const struct file_operations sta_ ##name## _ops = { \
STA_FILE(aid, sta.aid, D);
STA_FILE(dev, sdata->name, S);
-STA_FILE(rx_packets, rx_packets, LU);
-STA_FILE(tx_packets, tx_packets, LU);
-STA_FILE(rx_bytes, rx_bytes, LU);
-STA_FILE(tx_bytes, tx_bytes, LU);
-STA_FILE(rx_duplicates, num_duplicates, LU);
-STA_FILE(rx_fragments, rx_fragments, LU);
-STA_FILE(rx_dropped, rx_dropped, LU);
-STA_FILE(tx_fragments, tx_fragments, LU);
-STA_FILE(tx_filtered, tx_filtered_count, LU);
-STA_FILE(tx_retry_failed, tx_retry_failed, LU);
-STA_FILE(tx_retry_count, tx_retry_count, LU);
STA_FILE(last_signal, last_signal, D);
-STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -134,28 +121,25 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
p += scnprintf(p, sizeof(buf) + buf - p,
- "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
+ "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
for (i = 0; i < STA_TID_NUM; i++) {
p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
- sta->ampdu_mlme.tid_active_rx[i]);
+ !!sta->ampdu_mlme.tid_rx[i]);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
- sta->ampdu_mlme.tid_active_rx[i] ?
+ sta->ampdu_mlme.tid_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
- sta->ampdu_mlme.tid_active_rx[i] ?
+ sta->ampdu_mlme.tid_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->ssn : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
- sta->ampdu_mlme.tid_state_tx[i]);
+ !!sta->ampdu_mlme.tid_tx[i]);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
- sta->ampdu_mlme.tid_state_tx[i] ?
+ sta->ampdu_mlme.tid_tx[i] ?
sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
- p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
- sta->ampdu_mlme.tid_state_tx[i] ?
- sta->ampdu_mlme.tid_tx[i]->ssn : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
- sta->ampdu_mlme.tid_state_tx[i] ?
+ sta->ampdu_mlme.tid_tx[i] ?
skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\n");
}
@@ -210,8 +194,7 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu
if (start)
ret = ieee80211_start_tx_ba_session(&sta->sta, tid);
else
- ret = ieee80211_stop_tx_ba_session(&sta->sta, tid,
- WLAN_BACK_RECIPIENT);
+ ret = ieee80211_stop_tx_ba_session(&sta->sta, tid);
} else {
__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3);
ret = 0;
@@ -307,6 +290,13 @@ STA_OPS(ht_capa);
debugfs_create_file(#name, 0400, \
sta->debugfs.dir, sta, &sta_ ##name## _ops);
+#define DEBUGFS_ADD_COUNTER(name, field) \
+ if (sizeof(sta->field) == sizeof(u32)) \
+ debugfs_create_u32(#name, 0400, sta->debugfs.dir, \
+ (u32 *) &sta->field); \
+ else \
+ debugfs_create_u64(#name, 0400, sta->debugfs.dir, \
+ (u64 *) &sta->field);
void ieee80211_sta_debugfs_add(struct sta_info *sta)
{
@@ -339,20 +329,21 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(last_seq_ctrl);
DEBUGFS_ADD(agg_status);
DEBUGFS_ADD(dev);
- DEBUGFS_ADD(rx_packets);
- DEBUGFS_ADD(tx_packets);
- DEBUGFS_ADD(rx_bytes);
- DEBUGFS_ADD(tx_bytes);
- DEBUGFS_ADD(rx_duplicates);
- DEBUGFS_ADD(rx_fragments);
- DEBUGFS_ADD(rx_dropped);
- DEBUGFS_ADD(tx_fragments);
- DEBUGFS_ADD(tx_filtered);
- DEBUGFS_ADD(tx_retry_failed);
- DEBUGFS_ADD(tx_retry_count);
DEBUGFS_ADD(last_signal);
- DEBUGFS_ADD(wep_weak_iv_count);
DEBUGFS_ADD(ht_capa);
+
+ DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
+ DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
+ DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes);
+ DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes);
+ DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
+ DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
+ DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped);
+ DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments);
+ DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
+ DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
+ DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
+ DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
}
void ieee80211_sta_debugfs_remove(struct sta_info *sta)
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 9c1da0809160..c33317320eee 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -16,10 +16,11 @@ static inline int drv_start(struct ieee80211_local *local)
might_sleep();
+ trace_drv_start(local);
local->started = true;
smp_mb();
ret = local->ops->start(&local->hw);
- trace_drv_start(local, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -27,8 +28,9 @@ static inline void drv_stop(struct ieee80211_local *local)
{
might_sleep();
- local->ops->stop(&local->hw);
trace_drv_stop(local);
+ local->ops->stop(&local->hw);
+ trace_drv_return_void(local);
/* sync away all work on the tasklet before clearing started */
tasklet_disable(&local->tasklet);
@@ -46,8 +48,9 @@ static inline int drv_add_interface(struct ieee80211_local *local,
might_sleep();
+ trace_drv_add_interface(local, vif_to_sdata(vif));
ret = local->ops->add_interface(&local->hw, vif);
- trace_drv_add_interface(local, vif_to_sdata(vif), ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -56,8 +59,9 @@ static inline void drv_remove_interface(struct ieee80211_local *local,
{
might_sleep();
- local->ops->remove_interface(&local->hw, vif);
trace_drv_remove_interface(local, vif_to_sdata(vif));
+ local->ops->remove_interface(&local->hw, vif);
+ trace_drv_return_void(local);
}
static inline int drv_config(struct ieee80211_local *local, u32 changed)
@@ -66,8 +70,9 @@ static inline int drv_config(struct ieee80211_local *local, u32 changed)
might_sleep();
+ trace_drv_config(local, changed);
ret = local->ops->config(&local->hw, changed);
- trace_drv_config(local, changed, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -78,9 +83,10 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
{
might_sleep();
+ trace_drv_bss_info_changed(local, sdata, info, changed);
if (local->ops->bss_info_changed)
local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
- trace_drv_bss_info_changed(local, sdata, info, changed);
+ trace_drv_return_void(local);
}
static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -88,10 +94,12 @@ static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
{
u64 ret = 0;
+ trace_drv_prepare_multicast(local, mc_list->count);
+
if (local->ops->prepare_multicast)
ret = local->ops->prepare_multicast(&local->hw, mc_list);
- trace_drv_prepare_multicast(local, mc_list->count, ret);
+ trace_drv_return_u64(local, ret);
return ret;
}
@@ -103,19 +111,21 @@ static inline void drv_configure_filter(struct ieee80211_local *local,
{
might_sleep();
- local->ops->configure_filter(&local->hw, changed_flags, total_flags,
- multicast);
trace_drv_configure_filter(local, changed_flags, total_flags,
multicast);
+ local->ops->configure_filter(&local->hw, changed_flags, total_flags,
+ multicast);
+ trace_drv_return_void(local);
}
static inline int drv_set_tim(struct ieee80211_local *local,
struct ieee80211_sta *sta, bool set)
{
int ret = 0;
+ trace_drv_set_tim(local, sta, set);
if (local->ops->set_tim)
ret = local->ops->set_tim(&local->hw, sta, set);
- trace_drv_set_tim(local, sta, set, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -129,8 +139,9 @@ static inline int drv_set_key(struct ieee80211_local *local,
might_sleep();
+ trace_drv_set_key(local, cmd, sdata, sta, key);
ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
- trace_drv_set_key(local, cmd, sdata, sta, key, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -145,10 +156,11 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
if (sta)
ista = &sta->sta;
+ trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
if (local->ops->update_tkip_key)
local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
ista, iv32, phase1key);
- trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
+ trace_drv_return_void(local);
}
static inline int drv_hw_scan(struct ieee80211_local *local,
@@ -159,8 +171,9 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
might_sleep();
+ trace_drv_hw_scan(local, sdata, req);
ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
- trace_drv_hw_scan(local, sdata, req, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -168,18 +181,20 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local)
{
might_sleep();
+ trace_drv_sw_scan_start(local);
if (local->ops->sw_scan_start)
local->ops->sw_scan_start(&local->hw);
- trace_drv_sw_scan_start(local);
+ trace_drv_return_void(local);
}
static inline void drv_sw_scan_complete(struct ieee80211_local *local)
{
might_sleep();
+ trace_drv_sw_scan_complete(local);
if (local->ops->sw_scan_complete)
local->ops->sw_scan_complete(&local->hw);
- trace_drv_sw_scan_complete(local);
+ trace_drv_return_void(local);
}
static inline int drv_get_stats(struct ieee80211_local *local,
@@ -211,9 +226,10 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
might_sleep();
+ trace_drv_set_rts_threshold(local, value);
if (local->ops->set_rts_threshold)
ret = local->ops->set_rts_threshold(&local->hw, value);
- trace_drv_set_rts_threshold(local, value, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -223,12 +239,13 @@ static inline int drv_set_coverage_class(struct ieee80211_local *local,
int ret = 0;
might_sleep();
+ trace_drv_set_coverage_class(local, value);
if (local->ops->set_coverage_class)
local->ops->set_coverage_class(&local->hw, value);
else
ret = -EOPNOTSUPP;
- trace_drv_set_coverage_class(local, value, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -237,9 +254,10 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
+ trace_drv_sta_notify(local, sdata, cmd, sta);
if (local->ops->sta_notify)
local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
- trace_drv_sta_notify(local, sdata, cmd, sta);
+ trace_drv_return_void(local);
}
static inline int drv_sta_add(struct ieee80211_local *local,
@@ -250,13 +268,11 @@ static inline int drv_sta_add(struct ieee80211_local *local,
might_sleep();
+ trace_drv_sta_add(local, sdata, sta);
if (local->ops->sta_add)
ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
- else if (local->ops->sta_notify)
- local->ops->sta_notify(&local->hw, &sdata->vif,
- STA_NOTIFY_ADD, sta);
- trace_drv_sta_add(local, sdata, sta, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -267,13 +283,11 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
{
might_sleep();
+ trace_drv_sta_remove(local, sdata, sta);
if (local->ops->sta_remove)
local->ops->sta_remove(&local->hw, &sdata->vif, sta);
- else if (local->ops->sta_notify)
- local->ops->sta_notify(&local->hw, &sdata->vif,
- STA_NOTIFY_REMOVE, sta);
- trace_drv_sta_remove(local, sdata, sta);
+ trace_drv_return_void(local);
}
static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
@@ -283,9 +297,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
might_sleep();
+ trace_drv_conf_tx(local, queue, params);
if (local->ops->conf_tx)
ret = local->ops->conf_tx(&local->hw, queue, params);
- trace_drv_conf_tx(local, queue, params, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -295,9 +310,10 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local)
might_sleep();
+ trace_drv_get_tsf(local);
if (local->ops->get_tsf)
ret = local->ops->get_tsf(&local->hw);
- trace_drv_get_tsf(local, ret);
+ trace_drv_return_u64(local, ret);
return ret;
}
@@ -305,18 +321,20 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
{
might_sleep();
+ trace_drv_set_tsf(local, tsf);
if (local->ops->set_tsf)
local->ops->set_tsf(&local->hw, tsf);
- trace_drv_set_tsf(local, tsf);
+ trace_drv_return_void(local);
}
static inline void drv_reset_tsf(struct ieee80211_local *local)
{
might_sleep();
+ trace_drv_reset_tsf(local);
if (local->ops->reset_tsf)
local->ops->reset_tsf(&local->hw);
- trace_drv_reset_tsf(local);
+ trace_drv_return_void(local);
}
static inline int drv_tx_last_beacon(struct ieee80211_local *local)
@@ -325,9 +343,10 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
might_sleep();
+ trace_drv_tx_last_beacon(local);
if (local->ops->tx_last_beacon)
ret = local->ops->tx_last_beacon(&local->hw);
- trace_drv_tx_last_beacon(local, ret);
+ trace_drv_return_int(local, ret);
return ret;
}
@@ -338,10 +357,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
u16 *ssn)
{
int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn);
+
if (local->ops->ampdu_action)
ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
sta, tid, ssn);
- trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret);
+
+ trace_drv_return_int(local, ret);
+
return ret;
}
@@ -370,6 +396,7 @@ static inline void drv_flush(struct ieee80211_local *local, bool drop)
trace_drv_flush(local, drop);
if (local->ops->flush)
local->ops->flush(&local->hw, drop);
+ trace_drv_return_void(local);
}
static inline void drv_channel_switch(struct ieee80211_local *local,
@@ -377,9 +404,9 @@ static inline void drv_channel_switch(struct ieee80211_local *local,
{
might_sleep();
- local->ops->channel_switch(&local->hw, ch_switch);
-
trace_drv_channel_switch(local, ch_switch);
+ local->ops->channel_switch(&local->hw, ch_switch);
+ trace_drv_return_void(local);
}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 6a9b2342a9c2..8da31caff931 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -36,20 +36,58 @@ static inline void trace_ ## name(proto) {}
* Tracing for driver callbacks.
*/
-TRACE_EVENT(drv_start,
- TP_PROTO(struct ieee80211_local *local, int ret),
+TRACE_EVENT(drv_return_void,
+ TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+ TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
+);
+TRACE_EVENT(drv_return_int,
+ TP_PROTO(struct ieee80211_local *local, int ret),
TP_ARGS(local, ret),
-
TP_STRUCT__entry(
LOCAL_ENTRY
__field(int, ret)
),
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ ),
+ TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret)
+);
+TRACE_EVENT(drv_return_u64,
+ TP_PROTO(struct ieee80211_local *local, u64 ret),
+ TP_ARGS(local, ret),
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u64, ret)
+ ),
TP_fast_assign(
LOCAL_ASSIGN;
__entry->ret = ret;
),
+ TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret)
+);
+
+TRACE_EVENT(drv_start,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
TP_printk(
LOCAL_PR_FMT, LOCAL_PR_ARG
@@ -76,28 +114,25 @@ TRACE_EVENT(drv_stop,
TRACE_EVENT(drv_add_interface,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- int ret),
+ struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, sdata, ret),
+ TP_ARGS(local, sdata),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
__array(char, addr, 6)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
memcpy(__entry->addr, sdata->vif.addr, 6);
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " addr:%pM ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
)
);
@@ -126,15 +161,13 @@ TRACE_EVENT(drv_remove_interface,
TRACE_EVENT(drv_config,
TP_PROTO(struct ieee80211_local *local,
- u32 changed,
- int ret),
+ u32 changed),
- TP_ARGS(local, changed, ret),
+ TP_ARGS(local, changed),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(u32, changed)
- __field(int, ret)
__field(u32, flags)
__field(int, power_level)
__field(int, dynamic_ps_timeout)
@@ -150,7 +183,6 @@ TRACE_EVENT(drv_config,
TP_fast_assign(
LOCAL_ASSIGN;
__entry->changed = changed;
- __entry->ret = ret;
__entry->flags = local->hw.conf.flags;
__entry->power_level = local->hw.conf.power_level;
__entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
@@ -164,8 +196,8 @@ TRACE_EVENT(drv_config,
),
TP_printk(
- LOCAL_PR_FMT " ch:%#x freq:%d ret:%d",
- LOCAL_PR_ARG, __entry->changed, __entry->center_freq, __entry->ret
+ LOCAL_PR_FMT " ch:%#x freq:%d",
+ LOCAL_PR_ARG, __entry->changed, __entry->center_freq
)
);
@@ -220,26 +252,23 @@ TRACE_EVENT(drv_bss_info_changed,
);
TRACE_EVENT(drv_prepare_multicast,
- TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret),
+ TP_PROTO(struct ieee80211_local *local, int mc_count),
- TP_ARGS(local, mc_count, ret),
+ TP_ARGS(local, mc_count),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(int, mc_count)
- __field(u64, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
__entry->mc_count = mc_count;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " prepare mc (%d): %llx",
- LOCAL_PR_ARG, __entry->mc_count,
- (unsigned long long) __entry->ret
+ LOCAL_PR_FMT " prepare mc (%d)",
+ LOCAL_PR_ARG, __entry->mc_count
)
);
@@ -273,27 +302,25 @@ TRACE_EVENT(drv_configure_filter,
TRACE_EVENT(drv_set_tim,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_sta *sta, bool set, int ret),
+ struct ieee80211_sta *sta, bool set),
- TP_ARGS(local, sta, set, ret),
+ TP_ARGS(local, sta, set),
TP_STRUCT__entry(
LOCAL_ENTRY
STA_ENTRY
__field(bool, set)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
STA_ASSIGN;
__entry->set = set;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d",
- LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret
+ LOCAL_PR_FMT STA_PR_FMT " set:%d",
+ LOCAL_PR_ARG, STA_PR_FMT, __entry->set
)
);
@@ -301,9 +328,9 @@ TRACE_EVENT(drv_set_key,
TP_PROTO(struct ieee80211_local *local,
enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key, int ret),
+ struct ieee80211_key_conf *key),
- TP_ARGS(local, cmd, sdata, sta, key, ret),
+ TP_ARGS(local, cmd, sdata, sta, key),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -313,7 +340,6 @@ TRACE_EVENT(drv_set_key,
__field(u8, hw_key_idx)
__field(u8, flags)
__field(s8, keyidx)
- __field(int, ret)
),
TP_fast_assign(
@@ -324,12 +350,11 @@ TRACE_EVENT(drv_set_key,
__entry->flags = key->flags;
__entry->keyidx = key->keyidx;
__entry->hw_key_idx = key->hw_key_idx;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
)
);
@@ -364,25 +389,23 @@ TRACE_EVENT(drv_update_tkip_key,
TRACE_EVENT(drv_hw_scan,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- struct cfg80211_scan_request *req, int ret),
+ struct cfg80211_scan_request *req),
- TP_ARGS(local, sdata, req, ret),
+ TP_ARGS(local, sdata, req),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT " ret:%d",
- LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT,
+ LOCAL_PR_ARG,VIF_PR_ARG
)
);
@@ -479,48 +502,44 @@ TRACE_EVENT(drv_get_tkip_seq,
);
TRACE_EVENT(drv_set_rts_threshold,
- TP_PROTO(struct ieee80211_local *local, u32 value, int ret),
+ TP_PROTO(struct ieee80211_local *local, u32 value),
- TP_ARGS(local, value, ret),
+ TP_ARGS(local, value),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(u32, value)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->ret = ret;
__entry->value = value;
),
TP_printk(
- LOCAL_PR_FMT " value:%d ret:%d",
- LOCAL_PR_ARG, __entry->value, __entry->ret
+ LOCAL_PR_FMT " value:%d",
+ LOCAL_PR_ARG, __entry->value
)
);
TRACE_EVENT(drv_set_coverage_class,
- TP_PROTO(struct ieee80211_local *local, u8 value, int ret),
+ TP_PROTO(struct ieee80211_local *local, u8 value),
- TP_ARGS(local, value, ret),
+ TP_ARGS(local, value),
TP_STRUCT__entry(
LOCAL_ENTRY
__field(u8, value)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->ret = ret;
__entry->value = value;
),
TP_printk(
- LOCAL_PR_FMT " value:%d ret:%d",
- LOCAL_PR_ARG, __entry->value, __entry->ret
+ LOCAL_PR_FMT " value:%d",
+ LOCAL_PR_ARG, __entry->value
)
);
@@ -555,27 +574,25 @@ TRACE_EVENT(drv_sta_notify,
TRACE_EVENT(drv_sta_add,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta, int ret),
+ struct ieee80211_sta *sta),
- TP_ARGS(local, sdata, sta, ret),
+ TP_ARGS(local, sdata, sta),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
STA_ENTRY
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
STA_ASSIGN;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
)
);
@@ -606,10 +623,9 @@ TRACE_EVENT(drv_sta_remove,
TRACE_EVENT(drv_conf_tx,
TP_PROTO(struct ieee80211_local *local, u16 queue,
- const struct ieee80211_tx_queue_params *params,
- int ret),
+ const struct ieee80211_tx_queue_params *params),
- TP_ARGS(local, queue, params, ret),
+ TP_ARGS(local, queue, params),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -618,13 +634,11 @@ TRACE_EVENT(drv_conf_tx,
__field(u16, cw_min)
__field(u16, cw_max)
__field(u8, aifs)
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
__entry->queue = queue;
- __entry->ret = ret;
__entry->txop = params->txop;
__entry->cw_max = params->cw_max;
__entry->cw_min = params->cw_min;
@@ -632,29 +646,27 @@ TRACE_EVENT(drv_conf_tx,
),
TP_printk(
- LOCAL_PR_FMT " queue:%d ret:%d",
- LOCAL_PR_ARG, __entry->queue, __entry->ret
+ LOCAL_PR_FMT " queue:%d",
+ LOCAL_PR_ARG, __entry->queue
)
);
TRACE_EVENT(drv_get_tsf,
- TP_PROTO(struct ieee80211_local *local, u64 ret),
+ TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local, ret),
+ TP_ARGS(local),
TP_STRUCT__entry(
LOCAL_ENTRY
- __field(u64, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " ret:%llu",
- LOCAL_PR_ARG, (unsigned long long)__entry->ret
+ LOCAL_PR_FMT,
+ LOCAL_PR_ARG
)
);
@@ -698,23 +710,21 @@ TRACE_EVENT(drv_reset_tsf,
);
TRACE_EVENT(drv_tx_last_beacon,
- TP_PROTO(struct ieee80211_local *local, int ret),
+ TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local, ret),
+ TP_ARGS(local),
TP_STRUCT__entry(
LOCAL_ENTRY
- __field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " ret:%d",
- LOCAL_PR_ARG, __entry->ret
+ LOCAL_PR_FMT,
+ LOCAL_PR_ARG
)
);
@@ -723,9 +733,9 @@ TRACE_EVENT(drv_ampdu_action,
struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, int ret),
+ u16 *ssn),
- TP_ARGS(local, sdata, action, sta, tid, ssn, ret),
+ TP_ARGS(local, sdata, action, sta, tid, ssn),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -733,7 +743,6 @@ TRACE_EVENT(drv_ampdu_action,
__field(u32, action)
__field(u16, tid)
__field(u16, ssn)
- __field(int, ret)
VIF_ENTRY
),
@@ -741,15 +750,14 @@ TRACE_EVENT(drv_ampdu_action,
LOCAL_ASSIGN;
VIF_ASSIGN;
STA_ASSIGN;
- __entry->ret = ret;
__entry->action = action;
__entry->tid = tid;
__entry->ssn = ssn ? *ssn : 0;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid
)
);
@@ -851,25 +859,23 @@ TRACE_EVENT(api_start_tx_ba_cb,
);
TRACE_EVENT(api_stop_tx_ba_session,
- TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator),
+ TP_PROTO(struct ieee80211_sta *sta, u16 tid),
- TP_ARGS(sta, tid, initiator),
+ TP_ARGS(sta, tid),
TP_STRUCT__entry(
STA_ENTRY
__field(u16, tid)
- __field(u16, initiator)
),
TP_fast_assign(
STA_ASSIGN;
__entry->tid = tid;
- __entry->initiator = initiator;
),
TP_printk(
- STA_PR_FMT " tid:%d initiator:%d",
- STA_PR_ARG, __entry->tid, __entry->initiator
+ STA_PR_FMT " tid:%d",
+ STA_PR_ARG, __entry->tid
)
);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 2ab106a0a491..be928ef7ef51 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -6,7 +6,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007-2008, Intel Corporation
+ * Copyright 2007-2010, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -105,6 +105,8 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta)
{
int i;
+ cancel_work_sync(&sta->ampdu_mlme.work);
+
for (i = 0; i < STA_TID_NUM; i++) {
__ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR);
__ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -112,6 +114,43 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta)
}
}
+void ieee80211_ba_session_work(struct work_struct *work)
+{
+ struct sta_info *sta =
+ container_of(work, struct sta_info, ampdu_mlme.work);
+ struct tid_ampdu_tx *tid_tx;
+ int tid;
+
+ /*
+ * When this flag is set, new sessions should be
+ * blocked, and existing sessions will be torn
+ * down by the code that set the flag, so this
+ * need not run.
+ */
+ if (test_sta_flags(sta, WLAN_STA_BLOCK_BA))
+ return;
+
+ mutex_lock(&sta->ampdu_mlme.mtx);
+ for (tid = 0; tid < STA_TID_NUM; tid++) {
+ if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired))
+ ___ieee80211_stop_rx_ba_session(
+ sta, tid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_TIMEOUT);
+
+ tid_tx = sta->ampdu_mlme.tid_tx[tid];
+ if (!tid_tx)
+ continue;
+
+ if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state))
+ ieee80211_tx_ba_session_handle_start(sta, tid);
+ else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
+ &tid_tx->state))
+ ___ieee80211_stop_tx_ba_session(sta, tid,
+ WLAN_BACK_INITIATOR);
+ }
+ mutex_unlock(&sta->ampdu_mlme.mtx);
+}
+
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u16 initiator, u16 reason_code)
@@ -176,13 +215,8 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
if (initiator == WLAN_BACK_INITIATOR)
__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0);
- else { /* WLAN_BACK_RECIPIENT */
- spin_lock_bh(&sta->lock);
- if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
- ___ieee80211_stop_tx_ba_session(sta, tid,
- WLAN_BACK_RECIPIENT);
- spin_unlock_bh(&sta->lock);
- }
+ else
+ __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT);
}
int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index b2cc1fda6cfd..d4e84b22a66d 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -172,11 +172,13 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
rcu_assign_pointer(ifibss->presp, skb);
sdata->vif.bss_conf.beacon_int = beacon_int;
+ sdata->vif.bss_conf.basic_rates = basic_rates;
bss_change = BSS_CHANGED_BEACON_INT;
bss_change |= ieee80211_reset_erp_info(sdata);
bss_change |= BSS_CHANGED_BSSID;
bss_change |= BSS_CHANGED_BEACON;
bss_change |= BSS_CHANGED_BEACON_ENABLED;
+ bss_change |= BSS_CHANGED_BASIC_RATES;
bss_change |= BSS_CHANGED_IBSS;
sdata->vif.bss_conf.ibss_joined = true;
ieee80211_bss_info_change_notify(sdata, bss_change);
@@ -529,7 +531,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
sdata->drop_unencrypted = 0;
__ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
- ifibss->channel, 3, /* first two are basic */
+ ifibss->channel, ifibss->basic_rates,
capability, 0);
}
@@ -727,8 +729,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
}
-static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
@@ -754,33 +756,11 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len);
break;
}
-
- kfree_skb(skb);
}
-static void ieee80211_ibss_work(struct work_struct *work)
+void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data, u.ibss.work);
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_ibss *ifibss;
- struct sk_buff *skb;
-
- if (WARN_ON(local->suspended))
- return;
-
- if (!ieee80211_sdata_running(sdata))
- return;
-
- if (local->scanning)
- return;
-
- if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC))
- return;
- ifibss = &sdata->u.ibss;
-
- while ((skb = skb_dequeue(&ifibss->skb_queue)))
- ieee80211_ibss_rx_queued_mgmt(sdata, skb);
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request))
return;
@@ -798,6 +778,15 @@ static void ieee80211_ibss_work(struct work_struct *work)
}
}
+static void ieee80211_queue_ibss_work(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct ieee80211_local *local = sdata->local;
+
+ set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
+ ieee80211_queue_work(&local->hw, &sdata->work);
+}
+
static void ieee80211_ibss_timer(unsigned long data)
{
struct ieee80211_sub_if_data *sdata =
@@ -810,8 +799,7 @@ static void ieee80211_ibss_timer(unsigned long data)
return;
}
- set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
- ieee80211_queue_work(&local->hw, &ifibss->work);
+ ieee80211_queue_ibss_work(sdata);
}
#ifdef CONFIG_PM
@@ -819,7 +807,6 @@ void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- cancel_work_sync(&ifibss->work);
if (del_timer_sync(&ifibss->timer))
ifibss->timer_running = true;
}
@@ -839,10 +826,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- INIT_WORK(&ifibss->work, ieee80211_ibss_work);
setup_timer(&ifibss->timer, ieee80211_ibss_timer,
(unsigned long) sdata);
- skb_queue_head_init(&ifibss->skb_queue);
}
/* scan finished notification */
@@ -859,37 +844,11 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
if (!sdata->u.ibss.ssid_len)
continue;
sdata->u.ibss.last_scan_completed = jiffies;
- mod_timer(&sdata->u.ibss.timer, 0);
+ ieee80211_queue_ibss_work(sdata);
}
mutex_unlock(&local->iflist_mtx);
}
-ieee80211_rx_result
-ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_mgmt *mgmt;
- u16 fc;
-
- if (skb->len < 24)
- return RX_DROP_MONITOR;
-
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = le16_to_cpu(mgmt->frame_control);
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_PROBE_RESP:
- case IEEE80211_STYPE_BEACON:
- case IEEE80211_STYPE_PROBE_REQ:
- case IEEE80211_STYPE_AUTH:
- skb_queue_tail(&sdata->u.ibss.skb_queue, skb);
- ieee80211_queue_work(&local->hw, &sdata->u.ibss.work);
- return RX_QUEUED;
- }
-
- return RX_DROP_MONITOR;
-}
-
int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
struct cfg80211_ibss_params *params)
{
@@ -902,6 +861,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.fixed_bssid = false;
sdata->u.ibss.privacy = params->privacy;
+ sdata->u.ibss.basic_rates = params->basic_rates;
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
@@ -949,7 +909,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(sdata->local);
set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
return 0;
}
@@ -957,10 +917,35 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
{
struct sk_buff *skb;
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct ieee80211_local *local = sdata->local;
+ struct cfg80211_bss *cbss;
+ u16 capability;
+ int active_ibss = 0;
+
+ active_ibss = ieee80211_sta_active_ibss(sdata);
+
+ if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+ capability = WLAN_CAPABILITY_IBSS;
+
+ if (ifibss->privacy)
+ capability |= WLAN_CAPABILITY_PRIVACY;
+
+ cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->channel,
+ ifibss->bssid, ifibss->ssid,
+ ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+ WLAN_CAPABILITY_PRIVACY,
+ capability);
+
+ if (cbss) {
+ cfg80211_unlink_bss(local->hw.wiphy, cbss);
+ cfg80211_put_bss(cbss);
+ }
+ }
del_timer_sync(&sdata->u.ibss.timer);
clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
- cancel_work_sync(&sdata->u.ibss.work);
+ cancel_work_sync(&sdata->work);
clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
sta_info_flush(sdata->local, sdata);
@@ -975,7 +960,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
synchronize_rcu();
kfree_skb(skb);
- skb_queue_purge(&sdata->u.ibss.skb_queue);
+ skb_queue_purge(&sdata->skb_queue);
memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
sdata->u.ibss.ssid_len = 0;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 1a9e2da37a93..6f905f153ed7 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -325,7 +325,6 @@ struct ieee80211_if_managed {
struct timer_list conn_mon_timer;
struct timer_list bcn_mon_timer;
struct timer_list chswitch_timer;
- struct work_struct work;
struct work_struct monitor_work;
struct work_struct chswitch_work;
struct work_struct beacon_connection_loss_work;
@@ -340,8 +339,6 @@ struct ieee80211_if_managed {
u16 aid;
- struct sk_buff_head skb_queue;
-
unsigned long timers_running; /* used for quiesce/restart */
bool powersave; /* powersave requested for this iface */
enum ieee80211_smps_mode req_smps, /* requested smps mode */
@@ -386,13 +383,12 @@ enum ieee80211_ibss_request {
struct ieee80211_if_ibss {
struct timer_list timer;
- struct work_struct work;
-
- struct sk_buff_head skb_queue;
unsigned long request;
unsigned long last_scan_completed;
+ u32 basic_rates;
+
bool timer_running;
bool fixed_bssid;
@@ -416,11 +412,9 @@ struct ieee80211_if_ibss {
};
struct ieee80211_if_mesh {
- struct work_struct work;
struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer;
struct timer_list mesh_path_root_timer;
- struct sk_buff_head skb_queue;
unsigned long timers_running;
@@ -517,6 +511,11 @@ struct ieee80211_sub_if_data {
u16 sequence_number;
+ struct work_struct work;
+ struct sk_buff_head skb_queue;
+
+ bool arp_filter_state;
+
/*
* AP this belongs to: self in AP mode and
* corresponding AP in VLAN mode, NULL for
@@ -569,11 +568,15 @@ ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata,
#endif
}
+enum sdata_queue_type {
+ IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
+ IEEE80211_SDATA_QUEUE_AGG_START = 1,
+ IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
+};
+
enum {
IEEE80211_RX_MSG = 1,
IEEE80211_TX_STATUS_MSG = 2,
- IEEE80211_DELBA_MSG = 3,
- IEEE80211_ADDBA_MSG = 4,
};
enum queue_stop_reason {
@@ -724,13 +727,7 @@ struct ieee80211_local {
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
struct tasklet_struct tx_pending_tasklet;
- /*
- * This lock is used to prevent concurrent A-MPDU
- * session start/stop processing, this thus also
- * synchronises the ->ampdu_action() callback to
- * drivers and limits it to one at a time.
- */
- spinlock_t ampdu_lock;
+ atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
/* number of interfaces with corresponding IFF_ flags */
atomic_t iff_allmultis, iff_promiscs;
@@ -746,10 +743,10 @@ struct ieee80211_local {
struct mutex iflist_mtx;
/*
- * Key lock, protects sdata's key_list and sta_info's
+ * Key mutex, protects sdata's key_list and sta_info's
* key pointers (write access, they're RCU.)
*/
- spinlock_t key_lock;
+ struct mutex key_mtx;
/* Scanning and BSS list */
@@ -851,6 +848,13 @@ struct ieee80211_local {
struct work_struct dynamic_ps_disable_work;
struct timer_list dynamic_ps_timer;
struct notifier_block network_latency_notifier;
+ struct notifier_block ifa_notifier;
+
+ /*
+ * The dynamic ps timeout configured from user space via WEXT -
+ * this will override whatever chosen by mac80211 internally.
+ */
+ int dynamic_ps_forced_timeout;
int user_power_level; /* in dBm */
int power_constr_level; /* in dBm */
@@ -874,9 +878,8 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
return netdev_priv(dev);
}
-/* this struct represents 802.11n's RA/TID combination along with our vif */
+/* this struct represents 802.11n's RA/TID combination */
struct ieee80211_ra_tid {
- struct ieee80211_vif *vif;
u8 ra[ETH_ALEN];
u16 tid;
};
@@ -985,29 +988,25 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_disassoc_request *req,
void *cookie);
-int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- const u8 *buf, size_t len, u64 *cookie);
-ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb);
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
int ieee80211_max_network_latency(struct notifier_block *nb,
unsigned long data, void *dummy);
+int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_sw_ie *sw_elem,
struct ieee80211_bss *bss,
u64 timestamp);
void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
-ieee80211_rx_result
-ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
u8 *bssid, u8 *addr, u32 supp_rates,
gfp_t gfp);
@@ -1016,6 +1015,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata);
void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
+
+/* mesh code */
+void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
/* scan/BSS handling */
void ieee80211_scan_work(struct work_struct *work);
@@ -1084,7 +1091,7 @@ struct ieee80211_tx_status_rtap_hdr {
u8 padding_for_rate;
__le16 tx_flags;
u8 data_retries;
-} __attribute__ ((packed));
+} __packed;
/* HT */
@@ -1099,6 +1106,8 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps, const u8 *da,
const u8 *bssid);
+void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ u16 initiator, u16 reason);
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
u16 initiator, u16 reason);
void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta);
@@ -1118,6 +1127,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator);
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator);
+void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
+void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+void ieee80211_ba_session_work(struct work_struct *work);
+void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 50deb017fd6e..910729fc18cd 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -268,7 +268,6 @@ static int ieee80211_open(struct net_device *dev)
changed |= ieee80211_reset_erp_info(sdata);
ieee80211_bss_info_change_notify(sdata, changed);
- ieee80211_enable_keys(sdata);
if (sdata->vif.type == NL80211_IFTYPE_STATION)
netif_carrier_off(dev);
@@ -321,15 +320,6 @@ static int ieee80211_open(struct net_device *dev)
ieee80211_recalc_ps(local, -1);
- /*
- * ieee80211_sta_work is disabled while network interface
- * is down. Therefore, some configuration changes may not
- * yet be effective. Trigger execution of ieee80211_sta_work
- * to fix this.
- */
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
-
netif_tx_start_all_queues(dev);
return 0;
@@ -349,7 +339,6 @@ static int ieee80211_stop(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
unsigned long flags;
struct sk_buff *skb, *tmp;
u32 hw_reconf_flags = 0;
@@ -366,18 +355,6 @@ static int ieee80211_stop(struct net_device *dev)
ieee80211_work_purge(sdata);
/*
- * Now delete all active aggregation sessions.
- */
- rcu_read_lock();
-
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
- if (sta->sdata == sdata)
- ieee80211_sta_tear_down_BA_sessions(sta);
- }
-
- rcu_read_unlock();
-
- /*
* Remove all stations associated with this interface.
*
* This must be done before calling ops->remove_interface()
@@ -483,27 +460,14 @@ static int ieee80211_stop(struct net_device *dev)
* whether the interface is running, which, at this point,
* it no longer is.
*/
- cancel_work_sync(&sdata->u.mgd.work);
cancel_work_sync(&sdata->u.mgd.chswitch_work);
cancel_work_sync(&sdata->u.mgd.monitor_work);
cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
- /*
- * When we get here, the interface is marked down.
- * Call synchronize_rcu() to wait for the RX path
- * should it be using the interface and enqueuing
- * frames at this very time on another CPU.
- */
- synchronize_rcu();
- skb_queue_purge(&sdata->u.mgd.skb_queue);
/* fall through */
case NL80211_IFTYPE_ADHOC:
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
del_timer_sync(&sdata->u.ibss.timer);
- cancel_work_sync(&sdata->u.ibss.work);
- synchronize_rcu();
- skb_queue_purge(&sdata->u.ibss.skb_queue);
- }
/* fall through */
case NL80211_IFTYPE_MESH_POINT:
if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -518,6 +482,16 @@ static int ieee80211_stop(struct net_device *dev)
}
/* fall through */
default:
+ flush_work(&sdata->work);
+ /*
+ * When we get here, the interface is marked down.
+ * Call synchronize_rcu() to wait for the RX path
+ * should it be using the interface and enqueuing
+ * frames at this very time on another CPU.
+ */
+ synchronize_rcu();
+ skb_queue_purge(&sdata->skb_queue);
+
if (local->scan_sdata == sdata)
ieee80211_scan_cancel(local);
@@ -531,8 +505,8 @@ static int ieee80211_stop(struct net_device *dev)
BSS_CHANGED_BEACON_ENABLED);
}
- /* disable all keys for as long as this netdev is down */
- ieee80211_disable_keys(sdata);
+ /* free all remaining keys, there shouldn't be any */
+ ieee80211_free_keys(sdata);
drv_remove_interface(local, &sdata->vif);
}
@@ -727,6 +701,136 @@ static void ieee80211_if_setup(struct net_device *dev)
dev->destructor = free_netdev;
}
+static void ieee80211_iface_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data, work);
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct sta_info *sta;
+ struct ieee80211_ra_tid *ra_tid;
+
+ if (!ieee80211_sdata_running(sdata))
+ return;
+
+ if (local->scanning)
+ return;
+
+ /*
+ * ieee80211_queue_work() should have picked up most cases,
+ * here we'll pick the rest.
+ */
+ if (WARN(local->suspended,
+ "interface work scheduled while going to suspend\n"))
+ return;
+
+ /* first process frames */
+ while ((skb = skb_dequeue(&sdata->skb_queue))) {
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
+ ra_tid = (void *)&skb->cb;
+ ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
+ ra_tid->tid);
+ } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
+ ra_tid = (void *)&skb->cb;
+ ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
+ ra_tid->tid);
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK) {
+ int len = skb->len;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (sta) {
+ switch (mgmt->u.action.u.addba_req.action_code) {
+ case WLAN_ACTION_ADDBA_REQ:
+ ieee80211_process_addba_request(
+ local, sta, mgmt, len);
+ break;
+ case WLAN_ACTION_ADDBA_RESP:
+ ieee80211_process_addba_resp(local, sta,
+ mgmt, len);
+ break;
+ case WLAN_ACTION_DELBA:
+ ieee80211_process_delba(sdata, sta,
+ mgmt, len);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ }
+ mutex_unlock(&local->sta_mtx);
+ } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
+ struct ieee80211_hdr *hdr = (void *)mgmt;
+ /*
+ * So the frame isn't mgmt, but frame_control
+ * is at the right place anyway, of course, so
+ * the if statement is correct.
+ *
+ * Warn if we have other data frame types here,
+ * they must not get here.
+ */
+ WARN_ON(hdr->frame_control &
+ cpu_to_le16(IEEE80211_STYPE_NULLFUNC));
+ WARN_ON(!(hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)));
+ /*
+ * This was a fragment of a frame, received while
+ * a block-ack session was active. That cannot be
+ * right, so terminate the session.
+ */
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (sta) {
+ u16 tid = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_TID_MASK;
+
+ __ieee80211_stop_rx_ba_session(
+ sta, tid, WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_REQUIRE_SETUP);
+ }
+ mutex_unlock(&local->sta_mtx);
+ } else switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ ieee80211_sta_rx_queued_mgmt(sdata, skb);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ieee80211_ibss_rx_queued_mgmt(sdata, skb);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ break;
+ ieee80211_mesh_rx_queued_mgmt(sdata, skb);
+ break;
+ default:
+ WARN(1, "frame for unexpected interface type");
+ break;
+ }
+
+ kfree_skb(skb);
+ }
+
+ /* then other type-dependent work */
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ ieee80211_sta_work(sdata);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ieee80211_ibss_work(sdata);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ break;
+ ieee80211_mesh_work(sdata);
+ break;
+ default:
+ break;
+ }
+}
+
+
/*
* Helper function to initialise an interface to a specific type.
*/
@@ -744,6 +848,9 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
/* only monitor differs */
sdata->dev->type = ARPHRD_ETHER;
+ skb_queue_head_init(&sdata->skb_queue);
+ INIT_WORK(&sdata->work, ieee80211_iface_work);
+
switch (type) {
case NL80211_IFTYPE_AP:
skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
@@ -969,6 +1076,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata->wdev.wiphy = local->hw.wiphy;
sdata->local = local;
sdata->dev = ndev;
+#ifdef CONFIG_INET
+ sdata->arp_filter_state = true;
+#endif
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
skb_queue_head_init(&sdata->fragments[i].skb_list);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index e8f6e3b252d8..50d1cff23d8e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -36,80 +36,20 @@
* There is currently no way of knowing this except by looking into
* debugfs.
*
- * All key operations are protected internally so you can call them at
- * any time.
+ * All key operations are protected internally.
*
* Within mac80211, key references are, just as STA structure references,
* protected by RCU. Note, however, that some things are unprotected,
* namely the key->sta dereferences within the hardware acceleration
- * functions. This means that sta_info_destroy() must flush the key todo
- * list.
- *
- * All the direct key list manipulation functions must not sleep because
- * they can operate on STA info structs that are protected by RCU.
+ * functions. This means that sta_info_destroy() must remove the key
+ * which waits for an RCU grace period.
*/
static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-/* key mutex: used to synchronise todo runners */
-static DEFINE_MUTEX(key_mutex);
-static DEFINE_SPINLOCK(todo_lock);
-static LIST_HEAD(todo_list);
-
-static void key_todo(struct work_struct *work)
+static void assert_key_lock(struct ieee80211_local *local)
{
- ieee80211_key_todo();
-}
-
-static DECLARE_WORK(todo_work, key_todo);
-
-/**
- * add_todo - add todo item for a key
- *
- * @key: key to add to do item for
- * @flag: todo flag(s)
- *
- * Must be called with IRQs or softirqs disabled.
- */
-static void add_todo(struct ieee80211_key *key, u32 flag)
-{
- if (!key)
- return;
-
- spin_lock(&todo_lock);
- key->flags |= flag;
- /*
- * Remove again if already on the list so that we move it to the end.
- */
- if (!list_empty(&key->todo))
- list_del(&key->todo);
- list_add_tail(&key->todo, &todo_list);
- schedule_work(&todo_work);
- spin_unlock(&todo_lock);
-}
-
-/**
- * ieee80211_key_lock - lock the mac80211 key operation lock
- *
- * This locks the (global) mac80211 key operation lock, all
- * key operations must be done under this lock.
- */
-static void ieee80211_key_lock(void)
-{
- mutex_lock(&key_mutex);
-}
-
-/**
- * ieee80211_key_unlock - unlock the mac80211 key operation lock
- */
-static void ieee80211_key_unlock(void)
-{
- mutex_unlock(&key_mutex);
-}
-
-static void assert_key_lock(void)
-{
- WARN_ON(!mutex_is_locked(&key_mutex));
+ WARN_ON(!mutex_is_locked(&local->key_mtx));
}
static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
@@ -126,12 +66,13 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
struct ieee80211_sta *sta;
int ret;
- assert_key_lock();
might_sleep();
if (!key->local->ops->set_key)
return;
+ assert_key_lock(key->local);
+
sta = get_sta_for_key(key);
sdata = key->sdata;
@@ -142,11 +83,8 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
- if (!ret) {
- spin_lock_bh(&todo_lock);
+ if (!ret)
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
- spin_unlock_bh(&todo_lock);
- }
if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP)
printk(KERN_ERR "mac80211-%s: failed to set key "
@@ -161,18 +99,15 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
struct ieee80211_sta *sta;
int ret;
- assert_key_lock();
might_sleep();
if (!key || !key->local->ops->set_key)
return;
- spin_lock_bh(&todo_lock);
- if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
- spin_unlock_bh(&todo_lock);
+ assert_key_lock(key->local);
+
+ if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
return;
- }
- spin_unlock_bh(&todo_lock);
sta = get_sta_for_key(key);
sdata = key->sdata;
@@ -191,9 +126,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
wiphy_name(key->local->hw.wiphy),
key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
- spin_lock_bh(&todo_lock);
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
- spin_unlock_bh(&todo_lock);
}
static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
@@ -201,22 +134,24 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_key *key = NULL;
+ assert_key_lock(sdata->local);
+
if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
key = sdata->keys[idx];
rcu_assign_pointer(sdata->default_key, key);
- if (key)
- add_todo(key, KEY_FLAG_TODO_DEFKEY);
+ if (key) {
+ ieee80211_debugfs_key_remove_default(key->sdata);
+ ieee80211_debugfs_key_add_default(key->sdata);
+ }
}
void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sdata->local->key_lock, flags);
+ mutex_lock(&sdata->local->key_mtx);
__ieee80211_set_default_key(sdata, idx);
- spin_unlock_irqrestore(&sdata->local->key_lock, flags);
+ mutex_unlock(&sdata->local->key_mtx);
}
static void
@@ -224,24 +159,26 @@ __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx)
{
struct ieee80211_key *key = NULL;
+ assert_key_lock(sdata->local);
+
if (idx >= NUM_DEFAULT_KEYS &&
idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
key = sdata->keys[idx];
rcu_assign_pointer(sdata->default_mgmt_key, key);
- if (key)
- add_todo(key, KEY_FLAG_TODO_DEFMGMTKEY);
+ if (key) {
+ ieee80211_debugfs_key_remove_mgmt_default(key->sdata);
+ ieee80211_debugfs_key_add_mgmt_default(key->sdata);
+ }
}
void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
int idx)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sdata->local->key_lock, flags);
+ mutex_lock(&sdata->local->key_mtx);
__ieee80211_set_default_mgmt_key(sdata, idx);
- spin_unlock_irqrestore(&sdata->local->key_lock, flags);
+ mutex_unlock(&sdata->local->key_mtx);
}
@@ -336,7 +273,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
key->conf.iv_len = CCMP_HDR_LEN;
key->conf.icv_len = CCMP_MIC_LEN;
if (seq) {
- for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
+ for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++)
for (j = 0; j < CCMP_PN_LEN; j++)
key->u.ccmp.rx_pn[i][j] =
seq[CCMP_PN_LEN - j - 1];
@@ -352,7 +289,6 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
}
memcpy(key->conf.key, key_data, key_len);
INIT_LIST_HEAD(&key->list);
- INIT_LIST_HEAD(&key->todo);
if (alg == ALG_CCMP) {
/*
@@ -382,12 +318,27 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
return key;
}
+static void __ieee80211_key_destroy(struct ieee80211_key *key)
+{
+ if (!key)
+ return;
+
+ ieee80211_key_disable_hw_accel(key);
+
+ if (key->conf.alg == ALG_CCMP)
+ ieee80211_aes_key_free(key->u.ccmp.tfm);
+ if (key->conf.alg == ALG_AES_CMAC)
+ ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
+ ieee80211_debugfs_key_remove(key);
+
+ kfree(key);
+}
+
void ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
struct ieee80211_key *old_key;
- unsigned long flags;
int idx;
BUG_ON(!sdata);
@@ -431,7 +382,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
}
}
- spin_lock_irqsave(&sdata->local->key_lock, flags);
+ mutex_lock(&sdata->local->key_mtx);
if (sta)
old_key = sta->key;
@@ -439,15 +390,13 @@ void ieee80211_key_link(struct ieee80211_key *key,
old_key = sdata->keys[idx];
__ieee80211_key_replace(sdata, sta, old_key, key);
+ __ieee80211_key_destroy(old_key);
- /* free old key later */
- add_todo(old_key, KEY_FLAG_TODO_DELETE);
+ ieee80211_debugfs_key_add(key);
- add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
- if (ieee80211_sdata_running(sdata))
- add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
+ ieee80211_key_enable_hw_accel(key);
- spin_unlock_irqrestore(&sdata->local->key_lock, flags);
+ mutex_unlock(&sdata->local->key_mtx);
}
static void __ieee80211_key_free(struct ieee80211_key *key)
@@ -458,170 +407,65 @@ static void __ieee80211_key_free(struct ieee80211_key *key)
if (key->sdata)
__ieee80211_key_replace(key->sdata, key->sta,
key, NULL);
-
- add_todo(key, KEY_FLAG_TODO_DELETE);
+ __ieee80211_key_destroy(key);
}
void ieee80211_key_free(struct ieee80211_key *key)
{
- unsigned long flags;
+ struct ieee80211_local *local;
if (!key)
return;
- if (!key->sdata) {
- /* The key has not been linked yet, simply free it
- * and don't Oops */
- if (key->conf.alg == ALG_CCMP)
- ieee80211_aes_key_free(key->u.ccmp.tfm);
- kfree(key);
- return;
- }
+ local = key->sdata->local;
- spin_lock_irqsave(&key->sdata->local->key_lock, flags);
+ mutex_lock(&local->key_mtx);
__ieee80211_key_free(key);
- spin_unlock_irqrestore(&key->sdata->local->key_lock, flags);
+ mutex_unlock(&local->key_mtx);
}
-/*
- * To be safe against concurrent manipulations of the list (which shouldn't
- * actually happen) we need to hold the spinlock. But under the spinlock we
- * can't actually do much, so we defer processing to the todo list. Then run
- * the todo list to be sure the operation and possibly previously pending
- * operations are completed.
- */
-static void ieee80211_todo_for_each_key(struct ieee80211_sub_if_data *sdata,
- u32 todo_flags)
+void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_key *key;
- unsigned long flags;
-
- might_sleep();
-
- spin_lock_irqsave(&sdata->local->key_lock, flags);
- list_for_each_entry(key, &sdata->key_list, list)
- add_todo(key, todo_flags);
- spin_unlock_irqrestore(&sdata->local->key_lock, flags);
-
- ieee80211_key_todo();
-}
-void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
-{
ASSERT_RTNL();
if (WARN_ON(!ieee80211_sdata_running(sdata)))
return;
- ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD);
-}
-
-void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata)
-{
- ASSERT_RTNL();
-
- ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_REMOVE);
-}
-
-static void __ieee80211_key_destroy(struct ieee80211_key *key)
-{
- if (!key)
- return;
-
- ieee80211_key_disable_hw_accel(key);
+ mutex_lock(&sdata->local->key_mtx);
- if (key->conf.alg == ALG_CCMP)
- ieee80211_aes_key_free(key->u.ccmp.tfm);
- if (key->conf.alg == ALG_AES_CMAC)
- ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
- ieee80211_debugfs_key_remove(key);
+ list_for_each_entry(key, &sdata->key_list, list)
+ ieee80211_key_enable_hw_accel(key);
- kfree(key);
+ mutex_unlock(&sdata->local->key_mtx);
}
-static void __ieee80211_key_todo(void)
+void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_key *key;
- bool work_done;
- u32 todoflags;
- /*
- * NB: sta_info_destroy relies on this!
- */
- synchronize_rcu();
-
- spin_lock_bh(&todo_lock);
- while (!list_empty(&todo_list)) {
- key = list_first_entry(&todo_list, struct ieee80211_key, todo);
- list_del_init(&key->todo);
- todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS |
- KEY_FLAG_TODO_DEFKEY |
- KEY_FLAG_TODO_DEFMGMTKEY |
- KEY_FLAG_TODO_HWACCEL_ADD |
- KEY_FLAG_TODO_HWACCEL_REMOVE |
- KEY_FLAG_TODO_DELETE);
- key->flags &= ~todoflags;
- spin_unlock_bh(&todo_lock);
-
- work_done = false;
-
- if (todoflags & KEY_FLAG_TODO_ADD_DEBUGFS) {
- ieee80211_debugfs_key_add(key);
- work_done = true;
- }
- if (todoflags & KEY_FLAG_TODO_DEFKEY) {
- ieee80211_debugfs_key_remove_default(key->sdata);
- ieee80211_debugfs_key_add_default(key->sdata);
- work_done = true;
- }
- if (todoflags & KEY_FLAG_TODO_DEFMGMTKEY) {
- ieee80211_debugfs_key_remove_mgmt_default(key->sdata);
- ieee80211_debugfs_key_add_mgmt_default(key->sdata);
- work_done = true;
- }
- if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) {
- ieee80211_key_enable_hw_accel(key);
- work_done = true;
- }
- if (todoflags & KEY_FLAG_TODO_HWACCEL_REMOVE) {
- ieee80211_key_disable_hw_accel(key);
- work_done = true;
- }
- if (todoflags & KEY_FLAG_TODO_DELETE) {
- __ieee80211_key_destroy(key);
- work_done = true;
- }
+ ASSERT_RTNL();
- WARN_ON(!work_done);
+ mutex_lock(&sdata->local->key_mtx);
- spin_lock_bh(&todo_lock);
- }
- spin_unlock_bh(&todo_lock);
-}
+ list_for_each_entry(key, &sdata->key_list, list)
+ ieee80211_key_disable_hw_accel(key);
-void ieee80211_key_todo(void)
-{
- ieee80211_key_lock();
- __ieee80211_key_todo();
- ieee80211_key_unlock();
+ mutex_unlock(&sdata->local->key_mtx);
}
void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_key *key, *tmp;
- unsigned long flags;
- ieee80211_key_lock();
+ mutex_lock(&sdata->local->key_mtx);
ieee80211_debugfs_key_remove_default(sdata);
ieee80211_debugfs_key_remove_mgmt_default(sdata);
- spin_lock_irqsave(&sdata->local->key_lock, flags);
list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
__ieee80211_key_free(key);
- spin_unlock_irqrestore(&sdata->local->key_lock, flags);
-
- __ieee80211_key_todo();
- ieee80211_key_unlock();
+ mutex_unlock(&sdata->local->key_mtx);
}
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index bdc2968c2bbe..a3849fa3fce8 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -38,25 +38,9 @@ struct sta_info;
*
* @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
* in the hardware for TX crypto hardware acceleration.
- * @KEY_FLAG_TODO_DELETE: Key is marked for deletion and will, after an
- * RCU grace period, no longer be reachable other than from the
- * todo list.
- * @KEY_FLAG_TODO_HWACCEL_ADD: Key needs to be added to hardware acceleration.
- * @KEY_FLAG_TODO_HWACCEL_REMOVE: Key needs to be removed from hardware
- * acceleration.
- * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated.
- * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs.
- * @KEY_FLAG_TODO_DEFMGMTKEY: Key is default management key and debugfs needs
- * to be updated.
*/
enum ieee80211_internal_key_flags {
KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0),
- KEY_FLAG_TODO_DELETE = BIT(1),
- KEY_FLAG_TODO_HWACCEL_ADD = BIT(2),
- KEY_FLAG_TODO_HWACCEL_REMOVE = BIT(3),
- KEY_FLAG_TODO_DEFKEY = BIT(4),
- KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5),
- KEY_FLAG_TODO_DEFMGMTKEY = BIT(6),
};
enum ieee80211_internal_tkip_state {
@@ -79,10 +63,8 @@ struct ieee80211_key {
/* for sdata list */
struct list_head list;
- /* for todo list */
- struct list_head todo;
- /* protected by todo lock! */
+ /* protected by key mutex */
unsigned int flags;
union {
@@ -95,7 +77,13 @@ struct ieee80211_key {
} tkip;
struct {
u8 tx_pn[6];
- u8 rx_pn[NUM_RX_DATA_QUEUES][6];
+ /*
+ * Last received packet number. The first
+ * NUM_RX_DATA_QUEUES counters are used with Data
+ * frames and the last counter is used with Robust
+ * Management frames.
+ */
+ u8 rx_pn[NUM_RX_DATA_QUEUES + 1][6];
struct crypto_cipher *tfm;
u32 replays; /* dot11RSNAStatsCCMPReplays */
/* scratch buffers for virt_to_page() (crypto API) */
@@ -155,6 +143,4 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata);
-void ieee80211_key_todo(void);
-
#endif /* IEEE80211_KEY_H */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 22a384dfab65..edf7aff93268 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -20,6 +20,7 @@
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
#include <linux/pm_qos_params.h>
+#include <linux/inetdevice.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
@@ -259,7 +260,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
struct sk_buff *skb;
- struct ieee80211_ra_tid *ra_tid;
while ((skb = skb_dequeue(&local->skb_queue)) ||
(skb = skb_dequeue(&local->skb_queue_unreliable))) {
@@ -274,18 +274,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
skb->pkt_type = 0;
ieee80211_tx_status(local_to_hw(local), skb);
break;
- case IEEE80211_DELBA_MSG:
- ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
- ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra,
- ra_tid->tid);
- dev_kfree_skb(skb);
- break;
- case IEEE80211_ADDBA_MSG:
- ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
- ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra,
- ra_tid->tid);
- dev_kfree_skb(skb);
- break ;
default:
WARN(1, "mac80211: Packet is of unknown type %d\n",
skb->pkt_type);
@@ -329,6 +317,76 @@ static void ieee80211_recalc_smps_work(struct work_struct *work)
mutex_unlock(&local->iflist_mtx);
}
+#ifdef CONFIG_INET
+static int ieee80211_ifa_changed(struct notifier_block *nb,
+ unsigned long data, void *arg)
+{
+ struct in_ifaddr *ifa = arg;
+ struct ieee80211_local *local =
+ container_of(nb, struct ieee80211_local,
+ ifa_notifier);
+ struct net_device *ndev = ifa->ifa_dev->dev;
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct in_device *idev;
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ieee80211_if_managed *ifmgd;
+ int c = 0;
+
+ if (!netif_running(ndev))
+ return NOTIFY_DONE;
+
+ /* Make sure it's our interface that got changed */
+ if (!wdev)
+ return NOTIFY_DONE;
+
+ if (wdev->wiphy != local->hw.wiphy)
+ return NOTIFY_DONE;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
+ bss_conf = &sdata->vif.bss_conf;
+
+ /* ARP filtering is only supported in managed mode */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return NOTIFY_DONE;
+
+ idev = sdata->dev->ip_ptr;
+ if (!idev)
+ return NOTIFY_DONE;
+
+ ifmgd = &sdata->u.mgd;
+ mutex_lock(&ifmgd->mtx);
+
+ /* Copy the addresses to the bss_conf list */
+ ifa = idev->ifa_list;
+ while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) {
+ bss_conf->arp_addr_list[c] = ifa->ifa_address;
+ ifa = ifa->ifa_next;
+ c++;
+ }
+
+ /* If not all addresses fit the list, disable filtering */
+ if (ifa) {
+ sdata->arp_filter_state = false;
+ c = 0;
+ } else {
+ sdata->arp_filter_state = true;
+ }
+ bss_conf->arp_addr_cnt = c;
+
+ /* Configure driver only if associated */
+ if (ifmgd->associated) {
+ bss_conf->arp_filter_enabled = sdata->arp_filter_state;
+ ieee80211_bss_info_change_notify(sdata,
+ BSS_CHANGED_ARP_FILTER);
+ }
+
+ mutex_unlock(&ifmgd->mtx);
+
+ return NOTIFY_DONE;
+}
+#endif
+
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
const struct ieee80211_ops *ops)
{
@@ -396,7 +454,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
mutex_init(&local->iflist_mtx);
mutex_init(&local->scan_mtx);
- spin_lock_init(&local->key_lock);
+ mutex_init(&local->key_mtx);
spin_lock_init(&local->filter_lock);
spin_lock_init(&local->queue_stop_reason_lock);
@@ -419,8 +477,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
sta_info_init(local);
- for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
+ for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
skb_queue_head_init(&local->pending[i]);
+ atomic_set(&local->agg_queue_stop[i], 0);
+ }
tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
(unsigned long)local);
@@ -431,8 +491,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
- spin_lock_init(&local->ampdu_lock);
-
return local_to_hw(local);
}
EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -572,7 +630,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->hw.conf.listen_interval = local->hw.max_listen_interval;
- local->hw.conf.dynamic_ps_forced_timeout = -1;
+ local->dynamic_ps_forced_timeout = -1;
result = sta_info_start(local);
if (result < 0)
@@ -612,14 +670,24 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
ieee80211_max_network_latency;
result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
&local->network_latency_notifier);
-
if (result) {
rtnl_lock();
goto fail_pm_qos;
}
+#ifdef CONFIG_INET
+ local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
+ result = register_inetaddr_notifier(&local->ifa_notifier);
+ if (result)
+ goto fail_ifa;
+#endif
+
return 0;
+ fail_ifa:
+ pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
+ &local->network_latency_notifier);
+ rtnl_lock();
fail_pm_qos:
ieee80211_led_exit(local);
ieee80211_remove_interfaces(local);
@@ -647,6 +715,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
&local->network_latency_notifier);
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&local->ifa_notifier);
+#endif
rtnl_lock();
@@ -704,6 +775,10 @@ static int __init ieee80211_init(void)
if (ret)
return ret;
+ ret = rc80211_minstrel_ht_init();
+ if (ret)
+ goto err_minstrel;
+
ret = rc80211_pid_init();
if (ret)
goto err_pid;
@@ -716,6 +791,8 @@ static int __init ieee80211_init(void)
err_netdev:
rc80211_pid_exit();
err_pid:
+ rc80211_minstrel_ht_exit();
+ err_minstrel:
rc80211_minstrel_exit();
return ret;
@@ -724,6 +801,7 @@ static int __init ieee80211_init(void)
static void __exit ieee80211_exit(void)
{
rc80211_pid_exit();
+ rc80211_minstrel_ht_exit();
rc80211_minstrel_exit();
/*
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index bde81031727a..c8a4f19ed13b 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -54,7 +54,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
return;
}
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
/**
@@ -345,7 +345,7 @@ static void ieee80211_mesh_path_timer(unsigned long data)
return;
}
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
static void ieee80211_mesh_path_root_timer(unsigned long data)
@@ -362,7 +362,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data)
return;
}
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
@@ -484,9 +484,6 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- /* might restart the timer but that doesn't matter */
- cancel_work_sync(&ifmsh->work);
-
/* use atomic bitops in case both timers fire at the same time */
if (del_timer_sync(&ifmsh->housekeeping_timer))
@@ -518,7 +515,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
ieee80211_mesh_root_setup(ifmsh);
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED |
@@ -536,16 +533,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
* whether the interface is running, which, at this point,
* it no longer is.
*/
- cancel_work_sync(&sdata->u.mesh.work);
-
- /*
- * When we get here, the interface is marked down.
- * Call synchronize_rcu() to wait for the RX path
- * should it be using the interface and enqueuing
- * frames at this very time on another CPU.
- */
- rcu_barrier(); /* Wait for RX path and call_rcu()'s */
- skb_queue_purge(&sdata->u.mesh.skb_queue);
+ cancel_work_sync(&sdata->work);
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
@@ -608,8 +596,8 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
}
}
-static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status;
struct ieee80211_if_mesh *ifmsh;
@@ -632,26 +620,11 @@ static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
break;
}
-
- kfree_skb(skb);
}
-static void ieee80211_mesh_work(struct work_struct *work)
+void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data, u.mesh.work);
- struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct sk_buff *skb;
-
- if (!ieee80211_sdata_running(sdata))
- return;
-
- if (local->scanning)
- return;
-
- while ((skb = skb_dequeue(&ifmsh->skb_queue)))
- ieee80211_mesh_rx_queued_mgmt(sdata, skb);
if (ifmsh->preq_queue_len &&
time_after(jiffies,
@@ -678,7 +651,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
if (ieee80211_vif_is_mesh(&sdata->vif))
- ieee80211_queue_work(&local->hw, &sdata->u.mesh.work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
rcu_read_unlock();
}
@@ -686,11 +659,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- INIT_WORK(&ifmsh->work, ieee80211_mesh_work);
setup_timer(&ifmsh->housekeeping_timer,
ieee80211_mesh_housekeeping_timer,
(unsigned long) sdata);
- skb_queue_head_init(&sdata->u.mesh.skb_queue);
ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
@@ -731,29 +702,3 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
INIT_LIST_HEAD(&ifmsh->preq_queue.list);
spin_lock_init(&ifmsh->mesh_preq_queue_lock);
}
-
-ieee80211_rx_result
-ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee80211_mgmt *mgmt;
- u16 fc;
-
- if (skb->len < 24)
- return RX_DROP_MONITOR;
-
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = le16_to_cpu(mgmt->frame_control);
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_ACTION:
- case IEEE80211_STYPE_PROBE_RESP:
- case IEEE80211_STYPE_BEACON:
- skb_queue_tail(&ifmsh->skb_queue, skb);
- ieee80211_queue_work(&local->hw, &ifmsh->work);
- return RX_QUEUED;
- }
-
- return RX_CONTINUE;
-}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index c88087f1cd0f..ebd3f1d9d889 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -237,8 +237,6 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
struct sta_info *stainfo, struct sk_buff *skb);
void ieee80211s_stop(void);
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
-ieee80211_rx_result
-ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 0705018d8d1e..829e08a657d0 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -805,14 +805,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
spin_unlock(&ifmsh->mesh_preq_queue_lock);
if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
- ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
else if (time_before(jiffies, ifmsh->last_preq)) {
/* avoid long wait if did not send preqs for a long time
* and jiffies wrapped around
*/
ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
- ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
} else
mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
min_preq_int_jiff(sdata));
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 181ffd6efd81..349e466cf08b 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -315,7 +315,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
read_unlock(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
@@ -425,7 +425,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
read_unlock(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
- ieee80211_queue_work(&local->hw, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f803f8b72a93..85c3ca33333e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -561,23 +561,19 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
beaconint_us = ieee80211_tu_to_usec(
found->vif.bss_conf.beacon_int);
- timeout = local->hw.conf.dynamic_ps_forced_timeout;
+ timeout = local->dynamic_ps_forced_timeout;
if (timeout < 0) {
/*
+ * Go to full PSM if the user configures a very low
+ * latency requirement.
* The 2 second value is there for compatibility until
* the PM_QOS_NETWORK_LATENCY is configured with real
* values.
*/
- if (latency == 2000000000)
- timeout = 100;
- else if (latency <= 50000)
- timeout = 300;
- else if (latency <= 100000)
- timeout = 100;
- else if (latency <= 500000)
- timeout = 50;
- else
+ if (latency > 1900000000 && latency != 2000000000)
timeout = 0;
+ else
+ timeout = 100;
}
local->hw.conf.dynamic_ps_timeout = timeout;
@@ -806,11 +802,12 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_bss *bss = (void *)cbss->priv;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
bss_info_changed |= BSS_CHANGED_ASSOC;
/* set timing information */
- sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
- sdata->vif.bss_conf.timestamp = cbss->tsf;
+ bss_conf->beacon_int = cbss->beacon_interval;
+ bss_conf->timestamp = cbss->tsf;
bss_info_changed |= BSS_CHANGED_BEACON_INT;
bss_info_changed |= ieee80211_handle_bss_capability(sdata,
@@ -835,7 +832,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
ieee80211_led_assoc(local, 1);
- sdata->vif.bss_conf.assoc = 1;
+ bss_conf->assoc = 1;
/*
* For now just always ask the driver to update the basic rateset
* when we have associated, we aren't checking whether it actually
@@ -848,9 +845,15 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
/* Tell the driver to monitor connection quality (if supported) */
if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) &&
- sdata->vif.bss_conf.cqm_rssi_thold)
+ bss_conf->cqm_rssi_thold)
bss_info_changed |= BSS_CHANGED_CQM;
+ /* Enable ARP filtering */
+ if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) {
+ bss_conf->arp_filter_enabled = sdata->arp_filter_state;
+ bss_info_changed |= BSS_CHANGED_ARP_FILTER;
+ }
+
ieee80211_bss_info_change_notify(sdata, bss_info_changed);
mutex_lock(&local->iflist_mtx);
@@ -898,13 +901,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
netif_tx_stop_all_queues(sdata->dev);
netif_carrier_off(sdata->dev);
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, bssid);
if (sta) {
- set_sta_flags(sta, WLAN_STA_DISASSOC);
+ set_sta_flags(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
changed |= ieee80211_reset_erp_info(sdata);
@@ -932,6 +935,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_hw_config(local, config_changed);
+ /* Disable ARP filtering */
+ if (sdata->vif.bss_conf.arp_filter_enabled) {
+ sdata->vif.bss_conf.arp_filter_enabled = false;
+ changed |= BSS_CHANGED_ARP_FILTER;
+ }
+
/* The BSSID (not really interesting) and HT changed */
changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
ieee80211_bss_info_change_notify(sdata, changed);
@@ -1633,35 +1642,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_bss_info_change_notify(sdata, changed);
}
-ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_mgmt *mgmt;
- u16 fc;
-
- if (skb->len < 24)
- return RX_DROP_MONITOR;
-
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = le16_to_cpu(mgmt->frame_control);
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_PROBE_RESP:
- case IEEE80211_STYPE_BEACON:
- case IEEE80211_STYPE_DEAUTH:
- case IEEE80211_STYPE_DISASSOC:
- case IEEE80211_STYPE_ACTION:
- skb_queue_tail(&sdata->u.mgd.skb_queue, skb);
- ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
- return RX_QUEUED;
- }
-
- return RX_DROP_MONITOR;
-}
-
-static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_rx_status *rx_status;
@@ -1693,44 +1675,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
break;
case IEEE80211_STYPE_ACTION:
switch (mgmt->u.action.category) {
- case WLAN_CATEGORY_BACK: {
- struct ieee80211_local *local = sdata->local;
- int len = skb->len;
- struct sta_info *sta;
-
- rcu_read_lock();
- sta = sta_info_get(sdata, mgmt->sa);
- if (!sta) {
- rcu_read_unlock();
- break;
- }
-
- local_bh_disable();
-
- switch (mgmt->u.action.u.addba_req.action_code) {
- case WLAN_ACTION_ADDBA_REQ:
- if (len < (IEEE80211_MIN_ACTION_SIZE +
- sizeof(mgmt->u.action.u.addba_req)))
- break;
- ieee80211_process_addba_request(local, sta, mgmt, len);
- break;
- case WLAN_ACTION_ADDBA_RESP:
- if (len < (IEEE80211_MIN_ACTION_SIZE +
- sizeof(mgmt->u.action.u.addba_resp)))
- break;
- ieee80211_process_addba_resp(local, sta, mgmt, len);
- break;
- case WLAN_ACTION_DELBA:
- if (len < (IEEE80211_MIN_ACTION_SIZE +
- sizeof(mgmt->u.action.u.delba)))
- break;
- ieee80211_process_delba(sdata, sta, mgmt, len);
- break;
- }
- local_bh_enable();
- rcu_read_unlock();
- break;
- }
case WLAN_CATEGORY_SPECTRUM_MGMT:
ieee80211_sta_process_chanswitch(sdata,
&mgmt->u.action.u.chan_switch.sw_elem,
@@ -1754,7 +1698,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
default:
WARN(1, "unexpected: %d", rma);
}
- goto out;
+ return;
}
mutex_unlock(&ifmgd->mtx);
@@ -1799,8 +1743,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
}
- out:
- kfree_skb(skb);
}
static void ieee80211_sta_timer(unsigned long data)
@@ -1815,39 +1757,13 @@ static void ieee80211_sta_timer(unsigned long data)
return;
}
- ieee80211_queue_work(&local->hw, &ifmgd->work);
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
-static void ieee80211_sta_work(struct work_struct *work)
+void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data, u.mgd.work);
struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd;
- struct sk_buff *skb;
-
- if (!ieee80211_sdata_running(sdata))
- return;
-
- if (local->scanning)
- return;
-
- if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
- return;
-
- /*
- * ieee80211_queue_work() should have picked up most cases,
- * here we'll pick the the rest.
- */
- if (WARN(local->suspended, "STA MLME work scheduled while "
- "going to suspend\n"))
- return;
-
- ifmgd = &sdata->u.mgd;
-
- /* first process frames to avoid timing out while a frame is pending */
- while ((skb = skb_dequeue(&ifmgd->skb_queue)))
- ieee80211_sta_rx_queued_mgmt(sdata, skb);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
/* then process the rest of the work */
mutex_lock(&ifmgd->mtx);
@@ -1942,8 +1858,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
ieee80211_queue_work(&sdata->local->hw,
&sdata->u.mgd.monitor_work);
/* and do all the other regular work too */
- ieee80211_queue_work(&sdata->local->hw,
- &sdata->u.mgd.work);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
}
@@ -1958,7 +1873,6 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
* time -- the code here is properly synchronised.
*/
- cancel_work_sync(&ifmgd->work);
cancel_work_sync(&ifmgd->beacon_connection_loss_work);
if (del_timer_sync(&ifmgd->timer))
set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -1990,7 +1904,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_managed *ifmgd;
ifmgd = &sdata->u.mgd;
- INIT_WORK(&ifmgd->work, ieee80211_sta_work);
INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
INIT_WORK(&ifmgd->beacon_connection_loss_work,
@@ -2003,7 +1916,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
(unsigned long) sdata);
setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
(unsigned long) sdata);
- skb_queue_head_init(&ifmgd->skb_queue);
ifmgd->flags = 0;
@@ -2153,6 +2065,7 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
wk->filter_ta);
return WORK_DONE_DESTROY;
}
+
mutex_unlock(&wk->sdata->u.mgd.mtx);
}
@@ -2282,14 +2195,16 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_work *wk;
- const u8 *bssid = req->bss->bssid;
+ u8 bssid[ETH_ALEN];
+ bool assoc_bss = false;
mutex_lock(&ifmgd->mtx);
+ memcpy(bssid, req->bss->bssid, ETH_ALEN);
if (ifmgd->associated == req->bss) {
- bssid = req->bss->bssid;
- ieee80211_set_disassoc(sdata, true);
+ ieee80211_set_disassoc(sdata, false);
mutex_unlock(&ifmgd->mtx);
+ assoc_bss = true;
} else {
bool not_auth_yet = false;
@@ -2335,6 +2250,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH,
req->reason_code, cookie,
!req->local_state_change);
+ if (assoc_bss)
+ sta_info_destroy_addr(sdata, bssid);
ieee80211_recalc_idle(sdata->local);
@@ -2379,41 +2296,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
return 0;
}
-int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- const u8 *buf, size_t len, u64 *cookie)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct sk_buff *skb;
-
- /* Check that we are on the requested channel for transmission */
- if ((chan != local->tmp_channel ||
- channel_type != local->tmp_channel_type) &&
- (chan != local->oper_channel ||
- channel_type != local->_oper_channel_type))
- return -EBUSY;
-
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
- if (!skb)
- return -ENOMEM;
- skb_reserve(skb, local->hw.extra_tx_headroom);
-
- memcpy(skb_put(skb, len), buf, len);
-
- if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
- IEEE80211_SKB_CB(skb)->flags |=
- IEEE80211_TX_INTFL_DONT_ENCRYPT;
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
- IEEE80211_TX_CTL_REQ_TX_STATUS;
- skb->dev = sdata->dev;
- ieee80211_tx_skb(sdata, skb);
-
- *cookie = (unsigned long) skb;
- return 0;
-}
-
void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
enum nl80211_cqm_rssi_threshold_event rssi_event,
gfp_t gfp)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 75202b295a4e..d287fde0431d 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -40,22 +40,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
list_for_each_entry(sdata, &local->interfaces, list)
ieee80211_disable_keys(sdata);
- /* Tear down aggregation sessions */
-
- rcu_read_lock();
-
- if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ /* tear down aggregation sessions and remove STAs */
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta, &local->sta_list, list) {
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
set_sta_flags(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta);
}
- }
- rcu_read_unlock();
-
- /* remove STAs */
- mutex_lock(&local->sta_mtx);
- list_for_each_entry(sta, &local->sta_list, list) {
if (sta->uploaded) {
sdata = sta->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -72,6 +64,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
/* remove all interfaces */
list_for_each_entry(sdata, &local->interfaces, list) {
+ cancel_work_sync(&sdata->work);
+
switch(sdata->vif.type) {
case NL80211_IFTYPE_STATION:
ieee80211_sta_quiesce(sdata);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 065a96190e32..168427b0ffdc 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -147,5 +147,18 @@ static inline void rc80211_minstrel_exit(void)
}
#endif
+#ifdef CONFIG_MAC80211_RC_MINSTREL_HT
+extern int rc80211_minstrel_ht_init(void);
+extern void rc80211_minstrel_ht_exit(void);
+#else
+static inline int rc80211_minstrel_ht_init(void)
+{
+ return 0;
+}
+static inline void rc80211_minstrel_ht_exit(void)
+{
+}
+#endif
+
#endif /* IEEE80211_RATE_H */
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
new file mode 100644
index 000000000000..7a04951fcb1f
--- /dev/null
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+#include "rate.h"
+#include "rc80211_minstrel.h"
+#include "rc80211_minstrel_ht.h"
+
+#define AVG_PKT_SIZE 1200
+#define SAMPLE_COLUMNS 10
+#define EWMA_LEVEL 75
+
+/* Number of bits for an average sized packet */
+#define MCS_NBITS (AVG_PKT_SIZE << 3)
+
+/* Number of symbols for a packet with (bps) bits per symbol */
+#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps))
+
+/* Transmission time for a packet containing (syms) symbols */
+#define MCS_SYMBOL_TIME(sgi, syms) \
+ (sgi ? \
+ ((syms) * 18 + 4) / 5 : /* syms * 3.6 us */ \
+ (syms) << 2 /* syms * 4 us */ \
+ )
+
+/* Transmit duration for the raw data part of an average sized packet */
+#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
+
+/* MCS rate information for an MCS group */
+#define MCS_GROUP(_streams, _sgi, _ht40) { \
+ .streams = _streams, \
+ .flags = \
+ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
+ (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
+ .duration = { \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \
+ } \
+}
+
+/*
+ * To enable sufficiently targeted rate sampling, MCS rates are divided into
+ * groups, based on the number of streams and flags (HT40, SGI) that they
+ * use.
+ */
+const struct mcs_group minstrel_mcs_groups[] = {
+ MCS_GROUP(1, 0, 0),
+ MCS_GROUP(2, 0, 0),
+#if MINSTREL_MAX_STREAMS >= 3
+ MCS_GROUP(3, 0, 0),
+#endif
+
+ MCS_GROUP(1, 1, 0),
+ MCS_GROUP(2, 1, 0),
+#if MINSTREL_MAX_STREAMS >= 3
+ MCS_GROUP(3, 1, 0),
+#endif
+
+ MCS_GROUP(1, 0, 1),
+ MCS_GROUP(2, 0, 1),
+#if MINSTREL_MAX_STREAMS >= 3
+ MCS_GROUP(3, 0, 1),
+#endif
+
+ MCS_GROUP(1, 1, 1),
+ MCS_GROUP(2, 1, 1),
+#if MINSTREL_MAX_STREAMS >= 3
+ MCS_GROUP(3, 1, 1),
+#endif
+};
+
+static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
+
+/*
+ * Perform EWMA (Exponentially Weighted Moving Average) calculation
+ */
+static int
+minstrel_ewma(int old, int new, int weight)
+{
+ return (new * (100 - weight) + old * weight) / 100;
+}
+
+/*
+ * Look up an MCS group index based on mac80211 rate information
+ */
+static int
+minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
+{
+ int streams = (rate->idx / MCS_GROUP_RATES) + 1;
+ u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
+ if (minstrel_mcs_groups[i].streams != streams)
+ continue;
+ if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
+ continue;
+
+ return i;
+ }
+
+ WARN_ON(1);
+ return 0;
+}
+
+static inline struct minstrel_rate_stats *
+minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
+{
+ return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
+}
+
+
+/*
+ * Recalculate success probabilities and counters for a rate using EWMA
+ */
+static void
+minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr)
+{
+ if (unlikely(mr->attempts > 0)) {
+ mr->sample_skipped = 0;
+ mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
+ if (!mr->att_hist)
+ mr->probability = mr->cur_prob;
+ else
+ mr->probability = minstrel_ewma(mr->probability,
+ mr->cur_prob, EWMA_LEVEL);
+ mr->att_hist += mr->attempts;
+ mr->succ_hist += mr->success;
+ } else {
+ mr->sample_skipped++;
+ }
+ mr->last_success = mr->success;
+ mr->last_attempts = mr->attempts;
+ mr->success = 0;
+ mr->attempts = 0;
+}
+
+/*
+ * Calculate throughput based on the average A-MPDU length, taking into account
+ * the expected number of retransmissions and their expected length
+ */
+static void
+minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+ int group, int rate)
+{
+ struct minstrel_rate_stats *mr;
+ unsigned int usecs;
+
+ mr = &mi->groups[group].rates[rate];
+
+ if (mr->probability < MINSTREL_FRAC(1, 10)) {
+ mr->cur_tp = 0;
+ return;
+ }
+
+ usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
+ usecs += minstrel_mcs_groups[group].duration[rate];
+ mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability);
+}
+
+/*
+ * Update rate statistics and select new primary rates
+ *
+ * Rules for rate selection:
+ * - max_prob_rate must use only one stream, as a tradeoff between delivery
+ * probability and throughput during strong fluctuations
+ * - as long as the max prob rate has a probability of more than 3/4, pick
+ * higher throughput rates, even if the probablity is a bit lower
+ */
+static void
+minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
+{
+ struct minstrel_mcs_group_data *mg;
+ struct minstrel_rate_stats *mr;
+ int cur_prob, cur_prob_tp, cur_tp, cur_tp2;
+ int group, i, index;
+
+ if (mi->ampdu_packets > 0) {
+ mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
+ MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL);
+ mi->ampdu_len = 0;
+ mi->ampdu_packets = 0;
+ }
+
+ mi->sample_slow = 0;
+ mi->sample_count = 0;
+ mi->max_tp_rate = 0;
+ mi->max_tp_rate2 = 0;
+ mi->max_prob_rate = 0;
+
+ for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
+ cur_prob = 0;
+ cur_prob_tp = 0;
+ cur_tp = 0;
+ cur_tp2 = 0;
+
+ mg = &mi->groups[group];
+ if (!mg->supported)
+ continue;
+
+ mg->max_tp_rate = 0;
+ mg->max_tp_rate2 = 0;
+ mg->max_prob_rate = 0;
+ mi->sample_count++;
+
+ for (i = 0; i < MCS_GROUP_RATES; i++) {
+ if (!(mg->supported & BIT(i)))
+ continue;
+
+ mr = &mg->rates[i];
+ mr->retry_updated = false;
+ index = MCS_GROUP_RATES * group + i;
+ minstrel_calc_rate_ewma(mp, mr);
+ minstrel_ht_calc_tp(mp, mi, group, i);
+
+ if (!mr->cur_tp)
+ continue;
+
+ /* ignore the lowest rate of each single-stream group */
+ if (!i && minstrel_mcs_groups[group].streams == 1)
+ continue;
+
+ if ((mr->cur_tp > cur_prob_tp && mr->probability >
+ MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) {
+ mg->max_prob_rate = index;
+ cur_prob = mr->probability;
+ }
+
+ if (mr->cur_tp > cur_tp) {
+ swap(index, mg->max_tp_rate);
+ cur_tp = mr->cur_tp;
+ mr = minstrel_get_ratestats(mi, index);
+ }
+
+ if (index >= mg->max_tp_rate)
+ continue;
+
+ if (mr->cur_tp > cur_tp2) {
+ mg->max_tp_rate2 = index;
+ cur_tp2 = mr->cur_tp;
+ }
+ }
+ }
+
+ /* try to sample up to half of the availble rates during each interval */
+ mi->sample_count *= 4;
+
+ cur_prob = 0;
+ cur_prob_tp = 0;
+ cur_tp = 0;
+ cur_tp2 = 0;
+ for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
+ mg = &mi->groups[group];
+ if (!mg->supported)
+ continue;
+
+ mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
+ if (cur_prob_tp < mr->cur_tp &&
+ minstrel_mcs_groups[group].streams == 1) {
+ mi->max_prob_rate = mg->max_prob_rate;
+ cur_prob = mr->cur_prob;
+ }
+
+ mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
+ if (cur_tp < mr->cur_tp) {
+ mi->max_tp_rate = mg->max_tp_rate;
+ cur_tp = mr->cur_tp;
+ }
+
+ mr = minstrel_get_ratestats(mi, mg->max_tp_rate2);
+ if (cur_tp2 < mr->cur_tp) {
+ mi->max_tp_rate2 = mg->max_tp_rate2;
+ cur_tp2 = mr->cur_tp;
+ }
+ }
+
+ mi->stats_update = jiffies;
+}
+
+static bool
+minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
+{
+ if (!rate->count)
+ return false;
+
+ if (rate->idx < 0)
+ return false;
+
+ return !!(rate->flags & IEEE80211_TX_RC_MCS);
+}
+
+static void
+minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
+{
+ struct minstrel_mcs_group_data *mg;
+
+ for (;;) {
+ mi->sample_group++;
+ mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
+ mg = &mi->groups[mi->sample_group];
+
+ if (!mg->supported)
+ continue;
+
+ if (++mg->index >= MCS_GROUP_RATES) {
+ mg->index = 0;
+ if (++mg->column >= ARRAY_SIZE(sample_table))
+ mg->column = 0;
+ }
+ break;
+ }
+}
+
+static void
+minstrel_downgrade_rate(struct minstrel_ht_sta *mi, int *idx, bool primary)
+{
+ int group, orig_group;
+
+ orig_group = group = *idx / MCS_GROUP_RATES;
+ while (group > 0) {
+ group--;
+
+ if (!mi->groups[group].supported)
+ continue;
+
+ if (minstrel_mcs_groups[group].streams >
+ minstrel_mcs_groups[orig_group].streams)
+ continue;
+
+ if (primary)
+ *idx = mi->groups[group].max_tp_rate;
+ else
+ *idx = mi->groups[group].max_tp_rate2;
+ break;
+ }
+}
+
+static void
+minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ u16 tid;
+
+ if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
+ return;
+
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return;
+
+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ if (likely(sta->ampdu_mlme.tid_tx[tid]))
+ return;
+
+ ieee80211_start_tx_ba_session(pubsta, tid);
+}
+
+static void
+minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
+{
+ struct minstrel_ht_sta_priv *msp = priv_sta;
+ struct minstrel_ht_sta *mi = &msp->ht;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *ar = info->status.rates;
+ struct minstrel_rate_stats *rate, *rate2;
+ struct minstrel_priv *mp = priv;
+ bool last = false;
+ int group;
+ int i = 0;
+
+ if (!msp->is_ht)
+ return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb);
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ if (!info->status.ampdu_len) {
+ info->status.ampdu_ack_len = 1;
+ info->status.ampdu_len = 1;
+ }
+
+ mi->ampdu_packets++;
+ mi->ampdu_len += info->status.ampdu_len;
+
+ if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
+ mi->sample_wait = 4 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
+ mi->sample_tries = 3;
+ mi->sample_count--;
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ mi->sample_packets += info->status.ampdu_len;
+ minstrel_next_sample_idx(mi);
+ }
+
+ for (i = 0; !last; i++) {
+ last = (i == IEEE80211_TX_MAX_RATES - 1) ||
+ !minstrel_ht_txstat_valid(&ar[i + 1]);
+
+ if (!minstrel_ht_txstat_valid(&ar[i]))
+ break;
+
+ group = minstrel_ht_get_group_idx(&ar[i]);
+ rate = &mi->groups[group].rates[ar[i].idx % 8];
+
+ if (last && (info->flags & IEEE80211_TX_STAT_ACK))
+ rate->success += info->status.ampdu_ack_len;
+
+ rate->attempts += ar[i].count * info->status.ampdu_len;
+ }
+
+ /*
+ * check for sudden death of spatial multiplexing,
+ * downgrade to a lower number of streams if necessary.
+ */
+ rate = minstrel_get_ratestats(mi, mi->max_tp_rate);
+ if (rate->attempts > 30 &&
+ MINSTREL_FRAC(rate->success, rate->attempts) <
+ MINSTREL_FRAC(20, 100))
+ minstrel_downgrade_rate(mi, &mi->max_tp_rate, true);
+
+ rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2);
+ if (rate->attempts > 30 &&
+ MINSTREL_FRAC(rate->success, rate->attempts) <
+ MINSTREL_FRAC(20, 100))
+ minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false);
+
+ if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
+ minstrel_ht_update_stats(mp, mi);
+ minstrel_aggr_check(mp, sta, skb);
+ }
+}
+
+static void
+minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+ int index)
+{
+ struct minstrel_rate_stats *mr;
+ const struct mcs_group *group;
+ unsigned int tx_time, tx_time_rtscts, tx_time_data;
+ unsigned int cw = mp->cw_min;
+ unsigned int t_slot = 9; /* FIXME */
+ unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
+
+ mr = minstrel_get_ratestats(mi, index);
+ if (mr->probability < MINSTREL_FRAC(1, 10)) {
+ mr->retry_count = 1;
+ mr->retry_count_rtscts = 1;
+ return;
+ }
+
+ mr->retry_count = 2;
+ mr->retry_count_rtscts = 2;
+ mr->retry_updated = true;
+
+ group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
+ tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len;
+ tx_time = 2 * (t_slot + mi->overhead + tx_time_data);
+ tx_time_rtscts = 2 * (t_slot + mi->overhead_rtscts + tx_time_data);
+ do {
+ cw = (cw << 1) | 1;
+ cw = min(cw, mp->cw_max);
+ tx_time += cw + t_slot + mi->overhead;
+ tx_time_rtscts += cw + t_slot + mi->overhead_rtscts;
+ if (tx_time_rtscts < mp->segment_size)
+ mr->retry_count_rtscts++;
+ } while ((tx_time < mp->segment_size) &&
+ (++mr->retry_count < mp->max_retry));
+}
+
+
+static void
+minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+ struct ieee80211_tx_rate *rate, int index,
+ struct ieee80211_tx_rate_control *txrc,
+ bool sample, bool rtscts)
+{
+ const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
+ struct minstrel_rate_stats *mr;
+
+ mr = minstrel_get_ratestats(mi, index);
+ if (!mr->retry_updated)
+ minstrel_calc_retransmit(mp, mi, index);
+
+ if (mr->probability < MINSTREL_FRAC(20, 100))
+ rate->count = 2;
+ else if (rtscts)
+ rate->count = mr->retry_count_rtscts;
+ else
+ rate->count = mr->retry_count;
+
+ rate->flags = IEEE80211_TX_RC_MCS | group->flags;
+ if (txrc->short_preamble)
+ rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
+ if (txrc->rts || rtscts)
+ rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
+ rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES;
+}
+
+static inline int
+minstrel_get_duration(int index)
+{
+ const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
+ return group->duration[index % MCS_GROUP_RATES];
+}
+
+static int
+minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
+{
+ struct minstrel_rate_stats *mr;
+ struct minstrel_mcs_group_data *mg;
+ int sample_idx = 0;
+
+ if (mi->sample_wait > 0) {
+ mi->sample_wait--;
+ return -1;
+ }
+
+ if (!mi->sample_tries)
+ return -1;
+
+ mi->sample_tries--;
+ mg = &mi->groups[mi->sample_group];
+ sample_idx = sample_table[mg->column][mg->index];
+ mr = &mg->rates[sample_idx];
+ sample_idx += mi->sample_group * MCS_GROUP_RATES;
+
+ /*
+ * When not using MRR, do not sample if the probability is already
+ * higher than 95% to avoid wasting airtime
+ */
+ if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100)))
+ goto next;
+
+ /*
+ * Make sure that lower rates get sampled only occasionally,
+ * if the link is working perfectly.
+ */
+ if (minstrel_get_duration(sample_idx) >
+ minstrel_get_duration(mi->max_tp_rate)) {
+ if (mr->sample_skipped < 10)
+ goto next;
+
+ if (mi->sample_slow++ > 2)
+ goto next;
+ }
+
+ return sample_idx;
+
+next:
+ minstrel_next_sample_idx(mi);
+ return -1;
+}
+
+static void
+minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
+ struct ieee80211_tx_rate *ar = info->status.rates;
+ struct minstrel_ht_sta_priv *msp = priv_sta;
+ struct minstrel_ht_sta *mi = &msp->ht;
+ struct minstrel_priv *mp = priv;
+ int sample_idx;
+
+ if (rate_control_send_low(sta, priv_sta, txrc))
+ return;
+
+ if (!msp->is_ht)
+ return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
+
+ info->flags |= mi->tx_flags;
+ sample_idx = minstrel_get_sample_rate(mp, mi);
+ if (sample_idx >= 0) {
+ minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
+ txrc, true, false);
+ minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
+ txrc, false, true);
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+ } else {
+ minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
+ txrc, false, false);
+ minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
+ txrc, false, true);
+ }
+ minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, true);
+
+ ar[3].count = 0;
+ ar[3].idx = -1;
+
+ mi->total_packets++;
+
+ /* wraparound */
+ if (mi->total_packets == ~0) {
+ mi->total_packets = 0;
+ mi->sample_packets = 0;
+ }
+}
+
+static void
+minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ enum nl80211_channel_type oper_chan_type)
+{
+ struct minstrel_priv *mp = priv;
+ struct minstrel_ht_sta_priv *msp = priv_sta;
+ struct minstrel_ht_sta *mi = &msp->ht;
+ struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
+ struct ieee80211_local *local = hw_to_local(mp->hw);
+ u16 sta_cap = sta->ht_cap.cap;
+ int ack_dur;
+ int stbc;
+ int i;
+
+ /* fall back to the old minstrel for legacy stations */
+ if (sta && !sta->ht_cap.ht_supported) {
+ msp->is_ht = false;
+ memset(&msp->legacy, 0, sizeof(msp->legacy));
+ msp->legacy.r = msp->ratelist;
+ msp->legacy.sample_table = msp->sample_table;
+ return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
+ MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS);
+
+ msp->is_ht = true;
+ memset(mi, 0, sizeof(*mi));
+ mi->stats_update = jiffies;
+
+ ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1);
+ mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur;
+ mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
+
+ mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
+
+ /* When using MRR, sample more on the first attempt, without delay */
+ if (mp->has_mrr) {
+ mi->sample_count = 16;
+ mi->sample_wait = 0;
+ } else {
+ mi->sample_count = 8;
+ mi->sample_wait = 8;
+ }
+ mi->sample_tries = 4;
+
+ stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
+ IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
+
+ if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
+ mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
+
+ if (oper_chan_type != NL80211_CHAN_HT40MINUS &&
+ oper_chan_type != NL80211_CHAN_HT40PLUS)
+ sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
+ u16 req = 0;
+
+ mi->groups[i].supported = 0;
+ if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
+ if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ req |= IEEE80211_HT_CAP_SGI_40;
+ else
+ req |= IEEE80211_HT_CAP_SGI_20;
+ }
+
+ if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ if ((sta_cap & req) != req)
+ continue;
+
+ mi->groups[i].supported =
+ mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
+ }
+}
+
+static void
+minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+ struct minstrel_priv *mp = priv;
+
+ minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type);
+}
+
+static void
+minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed, enum nl80211_channel_type oper_chan_type)
+{
+ minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type);
+}
+
+static void *
+minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct ieee80211_supported_band *sband;
+ struct minstrel_ht_sta_priv *msp;
+ struct minstrel_priv *mp = priv;
+ struct ieee80211_hw *hw = mp->hw;
+ int max_rates = 0;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ sband = hw->wiphy->bands[i];
+ if (sband && sband->n_bitrates > max_rates)
+ max_rates = sband->n_bitrates;
+ }
+
+ msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
+ if (!msp)
+ return NULL;
+
+ msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
+ if (!msp->ratelist)
+ goto error;
+
+ msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
+ if (!msp->sample_table)
+ goto error1;
+
+ return msp;
+
+error1:
+ kfree(msp->sample_table);
+error:
+ kfree(msp);
+ return NULL;
+}
+
+static void
+minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
+{
+ struct minstrel_ht_sta_priv *msp = priv_sta;
+
+ kfree(msp->sample_table);
+ kfree(msp->ratelist);
+ kfree(msp);
+}
+
+static void *
+minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return mac80211_minstrel.alloc(hw, debugfsdir);
+}
+
+static void
+minstrel_ht_free(void *priv)
+{
+ mac80211_minstrel.free(priv);
+}
+
+static struct rate_control_ops mac80211_minstrel_ht = {
+ .name = "minstrel_ht",
+ .tx_status = minstrel_ht_tx_status,
+ .get_rate = minstrel_ht_get_rate,
+ .rate_init = minstrel_ht_rate_init,
+ .rate_update = minstrel_ht_rate_update,
+ .alloc_sta = minstrel_ht_alloc_sta,
+ .free_sta = minstrel_ht_free_sta,
+ .alloc = minstrel_ht_alloc,
+ .free = minstrel_ht_free,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = minstrel_ht_add_sta_debugfs,
+ .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
+#endif
+};
+
+
+static void
+init_sample_table(void)
+{
+ int col, i, new_idx;
+ u8 rnd[MCS_GROUP_RATES];
+
+ memset(sample_table, 0xff, sizeof(sample_table));
+ for (col = 0; col < SAMPLE_COLUMNS; col++) {
+ for (i = 0; i < MCS_GROUP_RATES; i++) {
+ get_random_bytes(rnd, sizeof(rnd));
+ new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
+
+ while (sample_table[col][new_idx] != 0xff)
+ new_idx = (new_idx + 1) % MCS_GROUP_RATES;
+
+ sample_table[col][new_idx] = i;
+ }
+ }
+}
+
+int __init
+rc80211_minstrel_ht_init(void)
+{
+ init_sample_table();
+ return ieee80211_rate_control_register(&mac80211_minstrel_ht);
+}
+
+void
+rc80211_minstrel_ht_exit(void)
+{
+ ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
+}
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
new file mode 100644
index 000000000000..696c0fc6e0b7
--- /dev/null
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RC_MINSTREL_HT_H
+#define __RC_MINSTREL_HT_H
+
+/*
+ * The number of streams can be changed to 2 to reduce code
+ * size and memory footprint.
+ */
+#define MINSTREL_MAX_STREAMS 3
+#define MINSTREL_STREAM_GROUPS 4
+
+/* scaled fraction values */
+#define MINSTREL_SCALE 16
+#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
+#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
+
+#define MCS_GROUP_RATES 8
+
+struct mcs_group {
+ u32 flags;
+ unsigned int streams;
+ unsigned int duration[MCS_GROUP_RATES];
+};
+
+struct minstrel_rate_stats {
+ /* current / last sampling period attempts/success counters */
+ unsigned int attempts, last_attempts;
+ unsigned int success, last_success;
+
+ /* total attempts/success counters */
+ u64 att_hist, succ_hist;
+
+ /* current throughput */
+ unsigned int cur_tp;
+
+ /* packet delivery probabilities */
+ unsigned int cur_prob, probability;
+
+ /* maximum retry counts */
+ unsigned int retry_count;
+ unsigned int retry_count_rtscts;
+
+ bool retry_updated;
+ u8 sample_skipped;
+};
+
+struct minstrel_mcs_group_data {
+ u8 index;
+ u8 column;
+
+ /* bitfield of supported MCS rates of this group */
+ u8 supported;
+
+ /* selected primary rates */
+ unsigned int max_tp_rate;
+ unsigned int max_tp_rate2;
+ unsigned int max_prob_rate;
+
+ /* MCS rate statistics */
+ struct minstrel_rate_stats rates[MCS_GROUP_RATES];
+};
+
+struct minstrel_ht_sta {
+ /* ampdu length (average, per sampling interval) */
+ unsigned int ampdu_len;
+ unsigned int ampdu_packets;
+
+ /* ampdu length (EWMA) */
+ unsigned int avg_ampdu_len;
+
+ /* best throughput rate */
+ unsigned int max_tp_rate;
+
+ /* second best throughput rate */
+ unsigned int max_tp_rate2;
+
+ /* best probability rate */
+ unsigned int max_prob_rate;
+
+ /* time of last status update */
+ unsigned long stats_update;
+
+ /* overhead time in usec for each frame */
+ unsigned int overhead;
+ unsigned int overhead_rtscts;
+
+ unsigned int total_packets;
+ unsigned int sample_packets;
+
+ /* tx flags to add for frames for this sta */
+ u32 tx_flags;
+
+ u8 sample_wait;
+ u8 sample_tries;
+ u8 sample_count;
+ u8 sample_slow;
+
+ /* current MCS group to be sampled */
+ u8 sample_group;
+
+ /* MCS rate group info and statistics */
+ struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS];
+};
+
+struct minstrel_ht_sta_priv {
+ union {
+ struct minstrel_ht_sta ht;
+ struct minstrel_sta_info legacy;
+ };
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *dbg_stats;
+#endif
+ void *ratelist;
+ void *sample_table;
+ bool is_ht;
+};
+
+void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
+void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta);
+
+#endif
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
new file mode 100644
index 000000000000..4fb3ccbd8b40
--- /dev/null
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/debugfs.h>
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+#include "rc80211_minstrel.h"
+#include "rc80211_minstrel_ht.h"
+
+extern const struct mcs_group minstrel_mcs_groups[];
+
+static int
+minstrel_ht_stats_open(struct inode *inode, struct file *file)
+{
+ struct minstrel_ht_sta_priv *msp = inode->i_private;
+ struct minstrel_ht_sta *mi = &msp->ht;
+ struct minstrel_debugfs_info *ms;
+ unsigned int i, j, tp, prob, eprob;
+ char *p;
+ int ret;
+
+ if (!msp->is_ht) {
+ inode->i_private = &msp->legacy;
+ ret = minstrel_stats_open(inode, file);
+ inode->i_private = msp;
+ return ret;
+ }
+
+ ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL);
+ if (!ms)
+ return -ENOMEM;
+
+ file->private_data = ms;
+ p = ms->buf;
+ p += sprintf(p, "type rate throughput ewma prob this prob "
+ "this succ/attempt success attempts\n");
+ for (i = 0; i < MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; i++) {
+ char htmode = '2';
+ char gimode = 'L';
+
+ if (!mi->groups[i].supported)
+ continue;
+
+ if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ htmode = '4';
+ if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ gimode = 'S';
+
+ for (j = 0; j < MCS_GROUP_RATES; j++) {
+ struct minstrel_rate_stats *mr = &mi->groups[i].rates[j];
+ int idx = i * MCS_GROUP_RATES + j;
+
+ if (!(mi->groups[i].supported & BIT(j)))
+ continue;
+
+ p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
+
+ *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
+ *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
+ *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
+ p += sprintf(p, "MCS%-2u", (minstrel_mcs_groups[i].streams - 1) *
+ MCS_GROUP_RATES + j);
+
+ tp = mr->cur_tp / 10;
+ prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
+ eprob = MINSTREL_TRUNC(mr->probability * 1000);
+
+ p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
+ "%3u(%3u) %8llu %8llu\n",
+ tp / 10, tp % 10,
+ eprob / 10, eprob % 10,
+ prob / 10, prob % 10,
+ mr->last_success,
+ mr->last_attempts,
+ (unsigned long long)mr->succ_hist,
+ (unsigned long long)mr->att_hist);
+ }
+ }
+ p += sprintf(p, "\nTotal packet count:: ideal %d "
+ "lookaround %d\n",
+ max(0, (int) mi->total_packets - (int) mi->sample_packets),
+ mi->sample_packets);
+ p += sprintf(p, "Average A-MPDU length: %d.%d\n",
+ MINSTREL_TRUNC(mi->avg_ampdu_len),
+ MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
+ ms->len = p - ms->buf;
+
+ return 0;
+}
+
+static const struct file_operations minstrel_ht_stat_fops = {
+ .owner = THIS_MODULE,
+ .open = minstrel_ht_stats_open,
+ .read = minstrel_stats_read,
+ .release = minstrel_stats_release,
+};
+
+void
+minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
+{
+ struct minstrel_ht_sta_priv *msp = priv_sta;
+
+ msp->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, msp,
+ &minstrel_ht_stat_fops);
+}
+
+void
+minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta)
+{
+ struct minstrel_ht_sta_priv *msp = priv_sta;
+
+ debugfs_remove(msp->dbg_stats);
+}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index be9abc2e6348..a8aa0f2411a2 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -719,16 +719,13 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
- spin_lock(&sta->lock);
-
- if (!sta->ampdu_mlme.tid_active_rx[tid])
- goto dont_reorder_unlock;
-
- tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
+ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+ if (!tid_agg_rx)
+ goto dont_reorder;
/* qos null data frames are excluded */
if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
- goto dont_reorder_unlock;
+ goto dont_reorder;
/* new, potentially un-ordered, ampdu frame - process it */
@@ -740,20 +737,22 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
/* if this mpdu is fragmented - terminate rx aggregation session */
sc = le16_to_cpu(hdr->seq_ctrl);
if (sc & IEEE80211_SCTL_FRAG) {
- spin_unlock(&sta->lock);
- __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
- WLAN_REASON_QSTA_REQUIRE_SETUP);
- dev_kfree_skb(skb);
+ skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&rx->sdata->skb_queue, skb);
+ ieee80211_queue_work(&local->hw, &rx->sdata->work);
return;
}
- if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) {
- spin_unlock(&sta->lock);
+ /*
+ * No locking needed -- we will only ever process one
+ * RX packet at a time, and thus own tid_agg_rx. All
+ * other code manipulating it needs to (and does) make
+ * sure that we cannot get to it any more before doing
+ * anything with it.
+ */
+ if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
return;
- }
- dont_reorder_unlock:
- spin_unlock(&sta->lock);
dont_reorder:
__skb_queue_tail(frames, skb);
}
@@ -825,6 +824,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
ieee80211_rx_result result = RX_DROP_UNUSABLE;
struct ieee80211_key *stakey = NULL;
int mmie_keyidx = -1;
+ __le16 fc;
/*
* Key selection 101
@@ -866,13 +866,15 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
if (rx->sta)
stakey = rcu_dereference(rx->sta->key);
- if (!ieee80211_has_protected(hdr->frame_control))
+ fc = hdr->frame_control;
+
+ if (!ieee80211_has_protected(fc))
mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
rx->key = stakey;
/* Skip decryption if the frame is not protected. */
- if (!ieee80211_has_protected(hdr->frame_control))
+ if (!ieee80211_has_protected(fc))
return RX_CONTINUE;
} else if (mmie_keyidx >= 0) {
/* Broadcast/multicast robust management frame / BIP */
@@ -884,7 +886,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
return RX_DROP_MONITOR; /* unexpected BIP keyidx */
rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
- } else if (!ieee80211_has_protected(hdr->frame_control)) {
+ } else if (!ieee80211_has_protected(fc)) {
/*
* The frame was not protected, so skip decryption. However, we
* need to set rx->key if there is a key that could have been
@@ -892,7 +894,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
* have been expected.
*/
struct ieee80211_key *key = NULL;
- if (ieee80211_is_mgmt(hdr->frame_control) &&
+ if (ieee80211_is_mgmt(fc) &&
is_multicast_ether_addr(hdr->addr1) &&
(key = rcu_dereference(rx->sdata->default_mgmt_key)))
rx->key = key;
@@ -914,7 +916,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
(status->flag & RX_FLAG_IV_STRIPPED))
return RX_CONTINUE;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ hdrlen = ieee80211_hdrlen(fc);
if (rx->skb->len < 8 + hdrlen)
return RX_DROP_UNUSABLE; /* TODO: count this? */
@@ -947,19 +949,17 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
-
- hdr = (struct ieee80211_hdr *)rx->skb->data;
-
- /* Check for weak IVs if possible */
- if (rx->sta && rx->key->conf.alg == ALG_WEP &&
- ieee80211_is_data(hdr->frame_control) &&
- (!(status->flag & RX_FLAG_IV_STRIPPED) ||
- !(status->flag & RX_FLAG_DECRYPTED)) &&
- ieee80211_wep_is_weak_iv(rx->skb, rx->key))
- rx->sta->wep_weak_iv_count++;
+ /* the hdr variable is invalid now! */
switch (rx->key->conf.alg) {
case ALG_WEP:
+ /* Check for weak IVs if possible */
+ if (rx->sta && ieee80211_is_data(fc) &&
+ (!(status->flag & RX_FLAG_IV_STRIPPED) ||
+ !(status->flag & RX_FLAG_DECRYPTED)) &&
+ ieee80211_wep_is_weak_iv(rx->skb, rx->key))
+ rx->sta->wep_weak_iv_count++;
+
result = ieee80211_crypto_wep_decrypt(rx);
break;
case ALG_TKIP:
@@ -1267,11 +1267,13 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
rx->queue, &(rx->skb));
if (rx->key && rx->key->conf.alg == ALG_CCMP &&
ieee80211_has_protected(fc)) {
+ int queue = ieee80211_is_mgmt(fc) ?
+ NUM_RX_DATA_QUEUES : rx->queue;
/* Store CCMP PN so that we can verify that the next
* fragment has a sequential PN value. */
entry->ccmp = 1;
memcpy(entry->last_pn,
- rx->key->u.ccmp.rx_pn[rx->queue],
+ rx->key->u.ccmp.rx_pn[queue],
CCMP_PN_LEN);
}
return RX_QUEUED;
@@ -1291,6 +1293,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (entry->ccmp) {
int i;
u8 pn[CCMP_PN_LEN], *rpn;
+ int queue;
if (!rx->key || rx->key->conf.alg != ALG_CCMP)
return RX_DROP_UNUSABLE;
memcpy(pn, entry->last_pn, CCMP_PN_LEN);
@@ -1299,7 +1302,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (pn[i])
break;
}
- rpn = rx->key->u.ccmp.rx_pn[rx->queue];
+ queue = ieee80211_is_mgmt(fc) ?
+ NUM_RX_DATA_QUEUES : rx->queue;
+ rpn = rx->key->u.ccmp.rx_pn[queue];
if (memcmp(pn, rpn, CCMP_PN_LEN))
return RX_DROP_UNUSABLE;
memcpy(entry->last_pn, pn, CCMP_PN_LEN);
@@ -1829,13 +1834,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
&bar_data, sizeof(bar_data)))
return RX_DROP_MONITOR;
- spin_lock(&rx->sta->lock);
tid = le16_to_cpu(bar_data.control) >> 12;
- if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
- spin_unlock(&rx->sta->lock);
+
+ tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
+ if (!tid_agg_rx)
return RX_DROP_MONITOR;
- }
- tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
@@ -1848,11 +1851,15 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
frames);
kfree_skb(skb);
- spin_unlock(&rx->sta->lock);
return RX_QUEUED;
}
- return RX_CONTINUE;
+ /*
+ * After this point, we only want management frames,
+ * so we can drop all remaining control frames to
+ * cooked monitor interfaces.
+ */
+ return RX_DROP_MONITOR;
}
static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
@@ -1944,30 +1951,27 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
break;
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- return ieee80211_sta_rx_mgmt(sdata, rx->skb);
-
switch (mgmt->u.action.u.addba_req.action_code) {
case WLAN_ACTION_ADDBA_REQ:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.addba_req)))
- return RX_DROP_MONITOR;
- ieee80211_process_addba_request(local, rx->sta, mgmt, len);
- goto handled;
+ goto invalid;
+ break;
case WLAN_ACTION_ADDBA_RESP:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.addba_resp)))
- break;
- ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
- goto handled;
+ goto invalid;
+ break;
case WLAN_ACTION_DELBA:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.delba)))
- break;
- ieee80211_process_delba(sdata, rx->sta, mgmt, len);
- goto handled;
+ goto invalid;
+ break;
+ default:
+ goto invalid;
}
- break;
+
+ goto queue;
case WLAN_CATEGORY_SPECTRUM_MGMT:
if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
break;
@@ -1997,7 +2001,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
break;
- return ieee80211_sta_rx_mgmt(sdata, rx->skb);
+ goto queue;
}
break;
case WLAN_CATEGORY_SA_QUERY:
@@ -2015,11 +2019,12 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
case WLAN_CATEGORY_MESH_PLINK:
case WLAN_CATEGORY_MESH_PATH_SEL:
- if (ieee80211_vif_is_mesh(&sdata->vif))
- return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
- break;
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ break;
+ goto queue;
}
+ invalid:
/*
* For AP mode, hostapd is responsible for handling any action
* frames that we didn't handle, including returning unknown
@@ -2039,8 +2044,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
*/
status = IEEE80211_SKB_RXCB(rx->skb);
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- cfg80211_rx_action(rx->sdata->dev, status->freq,
+ if (cfg80211_rx_action(rx->sdata->dev, status->freq,
rx->skb->data, rx->skb->len,
GFP_ATOMIC))
goto handled;
@@ -2068,6 +2072,14 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
rx->sta->rx_packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
+
+ queue:
+ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&sdata->skb_queue, rx->skb);
+ ieee80211_queue_work(&local->hw, &sdata->work);
+ if (rx->sta)
+ rx->sta->rx_packets++;
+ return RX_QUEUED;
}
static ieee80211_rx_result debug_noinline
@@ -2075,10 +2087,15 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
ieee80211_rx_result rxs;
+ struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
+ __le16 stype;
if (!(rx->flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_MONITOR;
+ if (rx->skb->len < 24)
+ return RX_DROP_MONITOR;
+
if (ieee80211_drop_unencrypted_mgmt(rx))
return RX_DROP_UNUSABLE;
@@ -2086,16 +2103,42 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
if (rxs != RX_CONTINUE)
return rxs;
- if (ieee80211_vif_is_mesh(&sdata->vif))
- return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
+ stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- return ieee80211_ibss_rx_mgmt(sdata, rx->skb);
+ if (!ieee80211_vif_is_mesh(&sdata->vif) &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+ sdata->vif.type != NL80211_IFTYPE_STATION)
+ return RX_DROP_MONITOR;
+
+ switch (stype) {
+ case cpu_to_le16(IEEE80211_STYPE_BEACON):
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ /* process for all: mesh, mlme, ibss */
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ /* process only for station */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return RX_DROP_MONITOR;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ /* process only for ibss */
+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ return RX_DROP_MONITOR;
+ break;
+ default:
+ return RX_DROP_MONITOR;
+ }
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- return ieee80211_sta_rx_mgmt(sdata, rx->skb);
+ /* queue up frame and kick off work to process it */
+ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&sdata->skb_queue, rx->skb);
+ ieee80211_queue_work(&rx->local->hw, &sdata->work);
+ if (rx->sta)
+ rx->sta->rx_packets++;
- return RX_DROP_MONITOR;
+ return RX_QUEUED;
}
static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
@@ -2151,7 +2194,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
u8 rate_or_pad;
__le16 chan_freq;
__le16 chan_flags;
- } __attribute__ ((packed)) *rthdr;
+ } __packed *rthdr;
struct sk_buff *skb = rx->skb, *skb2;
struct net_device *prev_dev = NULL;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ba9360a475b0..67656cbf2b15 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -235,6 +235,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
spin_lock_init(&sta->lock);
spin_lock_init(&sta->flaglock);
INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
+ INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
+ mutex_init(&sta->ampdu_mlme.mtx);
memcpy(sta->sta.addr, addr, ETH_ALEN);
sta->local = local;
@@ -246,14 +248,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
}
for (i = 0; i < STA_TID_NUM; i++) {
- /* timer_to_tid must be initialized with identity mapping to
- * enable session_timer's data differentiation. refer to
- * sta_rx_agg_session_timer_expired for useage */
+ /*
+ * timer_to_tid must be initialized with identity mapping
+ * to enable session_timer's data differentiation. See
+ * sta_rx_agg_session_timer_expired for usage.
+ */
sta->timer_to_tid[i] = i;
- /* tx */
- sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
- sta->ampdu_mlme.tid_tx[i] = NULL;
- sta->ampdu_mlme.addba_req_num[i] = 0;
}
skb_queue_head_init(&sta->ps_tx_buf);
skb_queue_head_init(&sta->tx_filtered);
@@ -648,14 +648,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
if (sta->key) {
ieee80211_key_free(sta->key);
- /*
- * We have only unlinked the key, and actually destroying it
- * may mean it is removed from hardware which requires that
- * the key->sta pointer is still valid, so flush the key todo
- * list here.
- */
- ieee80211_key_todo();
-
WARN_ON(sta->key);
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index df9d45544ca5..10d0fcb417ae 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -42,9 +42,6 @@
* be in the queues
* @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
* station in power-save mode, reply when the driver unblocks.
- * @WLAN_STA_DISASSOC: Disassociation in progress.
- * This is used to reject TX BA session requests when disassociation
- * is in progress.
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH = 1<<0,
@@ -60,38 +57,44 @@ enum ieee80211_sta_info_flags {
WLAN_STA_BLOCK_BA = 1<<11,
WLAN_STA_PS_DRIVER = 1<<12,
WLAN_STA_PSPOLL = 1<<13,
- WLAN_STA_DISASSOC = 1<<14,
};
#define STA_TID_NUM 16
#define ADDBA_RESP_INTERVAL HZ
-#define HT_AGG_MAX_RETRIES (0x3)
+#define HT_AGG_MAX_RETRIES 0x3
-#define HT_AGG_STATE_INITIATOR_SHIFT (4)
-
-#define HT_ADDBA_REQUESTED_MSK BIT(0)
-#define HT_ADDBA_DRV_READY_MSK BIT(1)
-#define HT_ADDBA_RECEIVED_MSK BIT(2)
-#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3)
-#define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT)
-#define HT_AGG_STATE_IDLE (0x0)
-#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \
- HT_ADDBA_DRV_READY_MSK | \
- HT_ADDBA_RECEIVED_MSK)
+#define HT_AGG_STATE_DRV_READY 0
+#define HT_AGG_STATE_RESPONSE_RECEIVED 1
+#define HT_AGG_STATE_OPERATIONAL 2
+#define HT_AGG_STATE_STOPPING 3
+#define HT_AGG_STATE_WANT_START 4
+#define HT_AGG_STATE_WANT_STOP 5
/**
* struct tid_ampdu_tx - TID aggregation information (Tx).
*
+ * @rcu_head: rcu head for freeing structure
* @addba_resp_timer: timer for peer's response to addba request
* @pending: pending frames queue -- use sta's spinlock to protect
- * @ssn: Starting Sequence Number expected to be aggregated.
* @dialog_token: dialog token for aggregation session
+ * @state: session state (see above)
+ * @stop_initiator: initiator of a session stop
+ *
+ * This structure is protected by RCU and the per-station
+ * spinlock. Assignments to the array holding it must hold
+ * the spinlock, only the TX path can access it under RCU
+ * lock-free if, and only if, the state has the flag
+ * %HT_AGG_STATE_OPERATIONAL set. Otherwise, the TX path
+ * must also acquire the spinlock and re-check the state,
+ * see comments in the tx code touching it.
*/
struct tid_ampdu_tx {
+ struct rcu_head rcu_head;
struct timer_list addba_resp_timer;
struct sk_buff_head pending;
- u16 ssn;
+ unsigned long state;
u8 dialog_token;
+ u8 stop_initiator;
};
/**
@@ -106,8 +109,18 @@ struct tid_ampdu_tx {
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
* @dialog_token: dialog token for aggregation session
+ * @rcu_head: RCU head used for freeing this struct
+ *
+ * This structure is protected by RCU and the per-station
+ * spinlock. Assignments to the array holding it must hold
+ * the spinlock, only the RX path can access it under RCU
+ * lock-free. The RX path, since it is single-threaded,
+ * can even modify the structure without locking since the
+ * only other modifications to it are done when the struct
+ * can not yet or no longer be found by the RX path.
*/
struct tid_ampdu_rx {
+ struct rcu_head rcu_head;
struct sk_buff **reorder_buf;
unsigned long *reorder_time;
struct timer_list session_timer;
@@ -120,6 +133,32 @@ struct tid_ampdu_rx {
};
/**
+ * struct sta_ampdu_mlme - STA aggregation information.
+ *
+ * @tid_rx: aggregation info for Rx per TID -- RCU protected
+ * @tid_tx: aggregation info for Tx per TID
+ * @addba_req_num: number of times addBA request has been sent.
+ * @dialog_token_allocator: dialog token enumerator for each new session;
+ * @work: work struct for starting/stopping aggregation
+ * @tid_rx_timer_expired: bitmap indicating on which TIDs the
+ * RX timer expired until the work for it runs
+ * @mtx: mutex to protect all TX data (except non-NULL assignments
+ * to tid_tx[idx], which are protected by the sta spinlock)
+ */
+struct sta_ampdu_mlme {
+ struct mutex mtx;
+ /* rx */
+ struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
+ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
+ /* tx */
+ struct work_struct work;
+ struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
+ u8 addba_req_num[STA_TID_NUM];
+ u8 dialog_token_allocator;
+};
+
+
+/**
* enum plink_state - state of a mesh peer link finite state machine
*
* @PLINK_LISTEN: initial state, considered the implicit state of non existant
@@ -143,28 +182,6 @@ enum plink_state {
};
/**
- * struct sta_ampdu_mlme - STA aggregation information.
- *
- * @tid_active_rx: TID's state in Rx session state machine.
- * @tid_rx: aggregation info for Rx per TID
- * @tid_state_tx: TID's state in Tx session state machine.
- * @tid_tx: aggregation info for Tx per TID
- * @addba_req_num: number of times addBA request has been sent.
- * @dialog_token_allocator: dialog token enumerator for each new session;
- */
-struct sta_ampdu_mlme {
- /* rx */
- bool tid_active_rx[STA_TID_NUM];
- struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
- /* tx */
- u8 tid_state_tx[STA_TID_NUM];
- struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
- u8 addba_req_num[STA_TID_NUM];
- u8 dialog_token_allocator;
-};
-
-
-/**
* struct sta_info - STA information
*
* This structure collects information about a station that
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 94613af009f3..34da67995d94 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -47,7 +47,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
/*
* This skb 'survived' a round-trip through the driver, and
* hopefully the driver didn't mangle it too badly. However,
- * we can definitely not rely on the the control information
+ * we can definitely not rely on the control information
* being correct. Clear it so we don't get junk there, and
* indicate that it needs new processing, but must not be
* modified/encrypted again.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 680bcb7093db..698d4718b1a4 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1092,6 +1092,59 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
return true;
}
+static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
+ struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct tid_ampdu_tx *tid_tx,
+ int tid)
+{
+ bool queued = false;
+
+ if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
+ /*
+ * nothing -- this aggregation session is being started
+ * but that might still fail with the driver
+ */
+ } else {
+ spin_lock(&tx->sta->lock);
+ /*
+ * Need to re-check now, because we may get here
+ *
+ * 1) in the window during which the setup is actually
+ * already done, but not marked yet because not all
+ * packets are spliced over to the driver pending
+ * queue yet -- if this happened we acquire the lock
+ * either before or after the splice happens, but
+ * need to recheck which of these cases happened.
+ *
+ * 2) during session teardown, if the OPERATIONAL bit
+ * was cleared due to the teardown but the pointer
+ * hasn't been assigned NULL yet (or we loaded it
+ * before it was assigned) -- in this case it may
+ * now be NULL which means we should just let the
+ * packet pass through because splicing the frames
+ * back is already done.
+ */
+ tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
+
+ if (!tid_tx) {
+ /* do nothing, let packet pass through */
+ } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ } else {
+ queued = true;
+ info->control.vif = &tx->sdata->vif;
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ __skb_queue_tail(&tid_tx->pending, skb);
+ }
+ spin_unlock(&tx->sta->lock);
+ }
+
+ return queued;
+}
+
/*
* initialises @tx
*/
@@ -1104,8 +1157,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int hdrlen, tid;
- u8 *qc, *state;
- bool queued = false;
+ u8 *qc;
memset(tx, 0, sizeof(*tx));
tx->skb = skb;
@@ -1157,35 +1209,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- spin_lock(&tx->sta->lock);
- /*
- * XXX: This spinlock could be fairly expensive, but see the
- * comment in agg-tx.c:ieee80211_agg_tx_operational().
- * One way to solve this would be to do something RCU-like
- * for managing the tid_tx struct and using atomic bitops
- * for the actual state -- by introducing an actual
- * 'operational' bit that would be possible. It would
- * require changing ieee80211_agg_tx_operational() to
- * set that bit, and changing the way tid_tx is managed
- * everywhere, including races between that bit and
- * tid_tx going away (tid_tx being added can be easily
- * committed to memory before the 'operational' bit).
- */
- tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
- state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
- if (*state == HT_AGG_STATE_OPERATIONAL) {
- info->flags |= IEEE80211_TX_CTL_AMPDU;
- } else if (*state != HT_AGG_STATE_IDLE) {
- /* in progress */
- queued = true;
- info->control.vif = &sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
- __skb_queue_tail(&tid_tx->pending, skb);
- }
- spin_unlock(&tx->sta->lock);
+ tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+ if (tid_tx) {
+ bool queued;
- if (unlikely(queued))
- return TX_QUEUED;
+ queued = ieee80211_tx_prep_agg(tx, skb, info,
+ tid_tx, tid);
+
+ if (unlikely(queued))
+ return TX_QUEUED;
+ }
}
if (is_multicast_ether_addr(hdr->addr1)) {
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 5b79d552780a..a54cf146ed50 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1138,18 +1138,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
mutex_unlock(&local->sta_mtx);
- /* Clear Suspend state so that ADDBA requests can be processed */
-
- rcu_read_lock();
-
- if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
- clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
- }
- }
-
- rcu_read_unlock();
-
/* setup RTS threshold */
drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
@@ -1202,13 +1190,26 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
- rcu_read_lock();
+ /*
+ * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
+ * sessions can be established after a resume.
+ *
+ * Also tear down aggregation sessions since reconfiguring
+ * them in a hardware restart scenario is not easily done
+ * right now, and the hardware will have lost information
+ * about the sessions, but we and the AP still think they
+ * are active. This is really a workaround though.
+ */
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ mutex_lock(&local->sta_mtx);
+
+ list_for_each_entry(sta, &local->sta_list, list) {
ieee80211_sta_tear_down_BA_sessions(sta);
+ clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
}
+
+ mutex_unlock(&local->sta_mtx);
}
- rcu_read_unlock();
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index b025dc7bb0fd..c22a71c5cb45 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -840,7 +840,7 @@ static void ieee80211_work_work(struct work_struct *work)
/*
* ieee80211_queue_work() should have picked up most cases,
- * here we'll pick the the rest.
+ * here we'll pick the rest.
*/
if (WARN(local->suspended, "work scheduled while going to suspend\n"))
return;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 0adbcc941ac9..a14e67707476 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -436,6 +436,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 pn[CCMP_PN_LEN];
int data_len;
+ int queue;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -453,7 +454,10 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
ccmp_hdr2pn(pn, skb->data + hdrlen);
- if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) {
+ queue = ieee80211_is_mgmt(hdr->frame_control) ?
+ NUM_RX_DATA_QUEUES : rx->queue;
+
+ if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) {
key->u.ccmp.replays++;
return RX_DROP_UNUSABLE;
}
@@ -470,7 +474,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
}
- memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN);
+ memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN);
/* Remove CCMP header and MIC */
skb_trim(skb, skb->len - CCMP_MIC_LEN);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8593a77cfea9..413ed24a968a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -424,6 +424,18 @@ config NETFILTER_XT_TARGET_HL
since you can easily create immortal packets that loop
forever on the network.
+config NETFILTER_XT_TARGET_IDLETIMER
+ tristate "IDLETIMER target support"
+ depends on NETFILTER_ADVANCED
+ help
+
+ This option adds the `IDLETIMER' target. Each matching packet
+ resets the timer associated with label specified when the rule is
+ added. When the timer expires, it triggers a sysfs notification.
+ The remaining time for expiration can be read via sysfs.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_LED
tristate '"LED" target support'
depends on LEDS_CLASS && LEDS_TRIGGERS
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 14e3a8fd8180..e28420aac5ef 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
# matches
obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 93c15a107b2c..02b078e11cf3 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -90,10 +90,10 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
&dest->addr.ip);
return NULL;
}
- __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst));
+ __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst));
IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
&dest->addr.ip,
- atomic_read(&rt->u.dst.__refcnt), rtos);
+ atomic_read(&rt->dst.__refcnt), rtos);
}
spin_unlock(&dest->dst_lock);
} else {
@@ -148,10 +148,10 @@ __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
&dest->addr.in6);
return NULL;
}
- __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst));
+ __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst));
IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n",
&dest->addr.in6,
- atomic_read(&rt->u.dst.__refcnt));
+ atomic_read(&rt->dst.__refcnt));
}
spin_unlock(&dest->dst_lock);
} else {
@@ -198,7 +198,7 @@ do { \
(skb)->ipvs_property = 1; \
skb_forward_csum(skb); \
NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
- (rt)->u.dst.dev, dst_output); \
+ (rt)->dst.dev, dst_output); \
} while (0)
@@ -245,7 +245,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
}
/* MTU checking */
- mtu = dst_mtu(&rt->u.dst);
+ mtu = dst_mtu(&rt->dst);
if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
@@ -265,7 +265,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* drop old route */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@@ -309,9 +309,9 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
}
/* MTU checking */
- mtu = dst_mtu(&rt->u.dst);
+ mtu = dst_mtu(&rt->dst);
if (skb->len > mtu) {
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@@ -323,13 +323,13 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
*/
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(skb == NULL)) {
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
return NF_STOLEN;
}
/* drop old route */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@@ -376,7 +376,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
- mtu = dst_mtu(&rt->u.dst);
+ mtu = dst_mtu(&rt->dst);
if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
@@ -388,12 +388,12 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!skb_make_writable(skb, sizeof(struct iphdr)))
goto tx_error_put;
- if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+ if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error_put;
/* drop old route */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@@ -452,9 +452,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
- mtu = dst_mtu(&rt->u.dst);
+ mtu = dst_mtu(&rt->dst);
if (skb->len > mtu) {
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL_PKT(0, pp, skb, 0,
"ip_vs_nat_xmit_v6(): frag needed for");
@@ -465,12 +465,12 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
goto tx_error_put;
- if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+ if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error_put;
/* drop old route */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@@ -498,7 +498,7 @@ tx_error:
kfree_skb(skb);
return NF_STOLEN;
tx_error_put:
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
goto tx_error;
}
#endif
@@ -549,9 +549,9 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos))))
goto tx_error_icmp;
- tdev = rt->u.dst.dev;
+ tdev = rt->dst.dev;
- mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
+ mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
ip_rt_put(rt);
IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
@@ -601,7 +601,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* drop old route */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/*
* Push down and install the IPIP header.
@@ -615,7 +615,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
iph->ttl = old_iph->ttl;
- ip_select_ident(iph, &rt->u.dst, NULL);
+ ip_select_ident(iph, &rt->dst, NULL);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@@ -660,12 +660,12 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!rt)
goto tx_error_icmp;
- tdev = rt->u.dst.dev;
+ tdev = rt->dst.dev;
- mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr);
+ mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
/* TODO IPv6: do we need this check in IPv6? */
if (mtu < 1280) {
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__);
goto tx_error;
}
@@ -674,7 +674,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -689,7 +689,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct sk_buff *new_skb =
skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
kfree_skb(skb);
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NF_STOLEN;
@@ -707,7 +707,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* drop old route */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/*
* Push down and install the IPIP header.
@@ -760,7 +760,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
- mtu = dst_mtu(&rt->u.dst);
+ mtu = dst_mtu(&rt->dst);
if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
@@ -780,7 +780,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* drop old route */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@@ -813,10 +813,10 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
- mtu = dst_mtu(&rt->u.dst);
+ mtu = dst_mtu(&rt->dst);
if (skb->len > mtu) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -827,13 +827,13 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
*/
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(skb == NULL)) {
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
return NF_STOLEN;
}
/* drop old route */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
@@ -888,7 +888,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
- mtu = dst_mtu(&rt->u.dst);
+ mtu = dst_mtu(&rt->dst);
if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
@@ -900,12 +900,12 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!skb_make_writable(skb, offset))
goto tx_error_put;
- if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+ if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error_put;
/* drop the old route when skb is not shared */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
ip_vs_nat_icmp(skb, pp, cp, 0);
@@ -963,9 +963,9 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_icmp;
/* MTU checking */
- mtu = dst_mtu(&rt->u.dst);
+ mtu = dst_mtu(&rt->dst);
if (skb->len > mtu) {
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@@ -975,12 +975,12 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!skb_make_writable(skb, offset))
goto tx_error_put;
- if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
+ if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error_put;
/* drop the old route when skb is not shared */
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
+ skb_dst_set(skb, &rt->dst);
ip_vs_nat_icmp_v6(skb, pp, cp, 0);
@@ -1001,7 +1001,7 @@ out:
LeaveFunction(10);
return rc;
tx_error_put:
- dst_release(&rt->u.dst);
+ dst_release(&rt->dst);
goto tx_error;
}
#endif
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index eeeb8bc73982..16b41b4e2a3c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -62,8 +62,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_max);
-struct nf_conn nf_conntrack_untracked __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
+DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd;
@@ -619,9 +619,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
/* Don't set timer yet: wait for confirmation */
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
-#ifdef CONFIG_NET_NS
- ct->ct_net = net;
-#endif
+ write_pnet(&ct->ct_net, net);
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (zone) {
struct nf_conntrack_zone *nf_ct_zone;
@@ -1183,10 +1181,21 @@ static void nf_ct_release_dying_list(struct net *net)
spin_unlock_bh(&nf_conntrack_lock);
}
+static int untrack_refs(void)
+{
+ int cnt = 0, cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
+
+ cnt += atomic_read(&ct->ct_general.use) - 1;
+ }
+ return cnt;
+}
+
static void nf_conntrack_cleanup_init_net(void)
{
- /* wait until all references to nf_conntrack_untracked are dropped */
- while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
+ while (untrack_refs() > 0)
schedule();
nf_conntrack_helper_fini();
@@ -1321,10 +1330,19 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
+void nf_ct_untracked_status_or(unsigned long bits)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(nf_conntrack_untracked, cpu).status |= bits;
+}
+EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
+
static int nf_conntrack_init_init_net(void)
{
int max_factor = 8;
- int ret;
+ int ret, cpu;
/* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
* machine has 512 buckets. >= 1GB machines have 16384 buckets. */
@@ -1363,13 +1381,13 @@ static int nf_conntrack_init_init_net(void)
goto err_extend;
#endif
/* Set up fake conntrack: to never be deleted, not in any hashes */
-#ifdef CONFIG_NET_NS
- nf_conntrack_untracked.ct_net = &init_net;
-#endif
- atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
+ for_each_possible_cpu(cpu) {
+ struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
+ write_pnet(&ct->ct_net, &init_net);
+ atomic_set(&ct->ct_general.use, 1);
+ }
/* - and look it like as a confirmed connection */
- set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
-
+ nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
return 0;
#ifdef CONFIG_NF_CONNTRACK_ZONES
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 6eaee7c8a337..b969025cf82f 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -734,11 +734,11 @@ static int callforward_do_filter(const union nf_inet_addr *src,
if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) {
if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
if (rt1->rt_gateway == rt2->rt_gateway &&
- rt1->u.dst.dev == rt2->u.dst.dev)
+ rt1->dst.dev == rt2->dst.dev)
ret = 1;
- dst_release(&rt2->u.dst);
+ dst_release(&rt2->dst);
}
- dst_release(&rt1->u.dst);
+ dst_release(&rt1->dst);
}
break;
}
@@ -753,11 +753,11 @@ static int callforward_do_filter(const union nf_inet_addr *src,
if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
sizeof(rt1->rt6i_gateway)) &&
- rt1->u.dst.dev == rt2->u.dst.dev)
+ rt1->dst.dev == rt2->dst.dev)
ret = 1;
- dst_release(&rt2->u.dst);
+ dst_release(&rt2->dst);
}
- dst_release(&rt1->u.dst);
+ dst_release(&rt1->dst);
}
break;
}
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index 497b2224536f..aadde018a072 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -61,7 +61,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
goto out;
rcu_read_lock();
- in_dev = __in_dev_get_rcu(rt->u.dst.dev);
+ in_dev = __in_dev_get_rcu(rt->dst.dev);
if (in_dev != NULL) {
for_primary_ifa(in_dev) {
if (ifa->ifa_broadcast == iph->daddr) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c42ff6aa441d..5bae1cd15eea 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -480,7 +480,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
int err;
/* ignore our fake conntrack entry */
- if (ct == &nf_conntrack_untracked)
+ if (nf_ct_is_untracked(ct))
return 0;
if (events & (1 << IPCT_DESTROY)) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 9dd8cd4fb6e6..802dbffae8b4 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -736,27 +736,19 @@ static bool tcp_in_window(const struct nf_conn *ct,
return res;
}
-#define TH_FIN 0x01
-#define TH_SYN 0x02
-#define TH_RST 0x04
-#define TH_PUSH 0x08
-#define TH_ACK 0x10
-#define TH_URG 0x20
-#define TH_ECE 0x40
-#define TH_CWR 0x80
-
/* table of valid flag combinations - PUSH, ECE and CWR are always valid */
-static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
+static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
+ TCPHDR_URG) + 1] =
{
- [TH_SYN] = 1,
- [TH_SYN|TH_URG] = 1,
- [TH_SYN|TH_ACK] = 1,
- [TH_RST] = 1,
- [TH_RST|TH_ACK] = 1,
- [TH_FIN|TH_ACK] = 1,
- [TH_FIN|TH_ACK|TH_URG] = 1,
- [TH_ACK] = 1,
- [TH_ACK|TH_URG] = 1,
+ [TCPHDR_SYN] = 1,
+ [TCPHDR_SYN|TCPHDR_URG] = 1,
+ [TCPHDR_SYN|TCPHDR_ACK] = 1,
+ [TCPHDR_RST] = 1,
+ [TCPHDR_RST|TCPHDR_ACK] = 1,
+ [TCPHDR_FIN|TCPHDR_ACK] = 1,
+ [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
+ [TCPHDR_ACK] = 1,
+ [TCPHDR_ACK|TCPHDR_URG] = 1,
};
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
@@ -803,7 +795,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
}
/* Check TCP flags. */
- tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR|TH_PUSH));
+ tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
if (!tcp_valid_flags[tcpflags]) {
if (LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fc9a211e629e..6a1572b0ab41 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -66,9 +66,10 @@ struct nfulnl_instance {
u_int16_t group_num; /* number of this queue */
u_int16_t flags;
u_int8_t copy_mode;
+ struct rcu_head rcu;
};
-static DEFINE_RWLOCK(instances_lock);
+static DEFINE_SPINLOCK(instances_lock);
static atomic_t global_seq;
#define INSTANCE_BUCKETS 16
@@ -88,7 +89,7 @@ __instance_lookup(u_int16_t group_num)
struct nfulnl_instance *inst;
head = &instance_table[instance_hashfn(group_num)];
- hlist_for_each_entry(inst, pos, head, hlist) {
+ hlist_for_each_entry_rcu(inst, pos, head, hlist) {
if (inst->group_num == group_num)
return inst;
}
@@ -106,22 +107,26 @@ instance_lookup_get(u_int16_t group_num)
{
struct nfulnl_instance *inst;
- read_lock_bh(&instances_lock);
+ rcu_read_lock_bh();
inst = __instance_lookup(group_num);
- if (inst)
- instance_get(inst);
- read_unlock_bh(&instances_lock);
+ if (inst && !atomic_inc_not_zero(&inst->use))
+ inst = NULL;
+ rcu_read_unlock_bh();
return inst;
}
+static void nfulnl_instance_free_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct nfulnl_instance, rcu));
+ module_put(THIS_MODULE);
+}
+
static void
instance_put(struct nfulnl_instance *inst)
{
- if (inst && atomic_dec_and_test(&inst->use)) {
- kfree(inst);
- module_put(THIS_MODULE);
- }
+ if (inst && atomic_dec_and_test(&inst->use))
+ call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
}
static void nfulnl_timer(unsigned long data);
@@ -132,7 +137,7 @@ instance_create(u_int16_t group_num, int pid)
struct nfulnl_instance *inst;
int err;
- write_lock_bh(&instances_lock);
+ spin_lock_bh(&instances_lock);
if (__instance_lookup(group_num)) {
err = -EEXIST;
goto out_unlock;
@@ -166,32 +171,37 @@ instance_create(u_int16_t group_num, int pid)
inst->copy_mode = NFULNL_COPY_PACKET;
inst->copy_range = NFULNL_COPY_RANGE_MAX;
- hlist_add_head(&inst->hlist,
+ hlist_add_head_rcu(&inst->hlist,
&instance_table[instance_hashfn(group_num)]);
- write_unlock_bh(&instances_lock);
+ spin_unlock_bh(&instances_lock);
return inst;
out_unlock:
- write_unlock_bh(&instances_lock);
+ spin_unlock_bh(&instances_lock);
return ERR_PTR(err);
}
static void __nfulnl_flush(struct nfulnl_instance *inst);
+/* called with BH disabled */
static void
__instance_destroy(struct nfulnl_instance *inst)
{
/* first pull it out of the global list */
- hlist_del(&inst->hlist);
+ hlist_del_rcu(&inst->hlist);
/* then flush all pending packets from skb */
- spin_lock_bh(&inst->lock);
+ spin_lock(&inst->lock);
+
+ /* lockless readers wont be able to use us */
+ inst->copy_mode = NFULNL_COPY_DISABLED;
+
if (inst->skb)
__nfulnl_flush(inst);
- spin_unlock_bh(&inst->lock);
+ spin_unlock(&inst->lock);
/* and finally put the refcount */
instance_put(inst);
@@ -200,9 +210,9 @@ __instance_destroy(struct nfulnl_instance *inst)
static inline void
instance_destroy(struct nfulnl_instance *inst)
{
- write_lock_bh(&instances_lock);
+ spin_lock_bh(&instances_lock);
__instance_destroy(inst);
- write_unlock_bh(&instances_lock);
+ spin_unlock_bh(&instances_lock);
}
static int
@@ -403,8 +413,9 @@ __build_packet_message(struct nfulnl_instance *inst,
NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
htonl(indev->ifindex));
/* this is the bridge group "brX" */
+ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
- htonl(indev->br_port->br->dev->ifindex));
+ htonl(br_port_get_rcu(indev)->br->dev->ifindex));
} else {
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
@@ -430,8 +441,9 @@ __build_packet_message(struct nfulnl_instance *inst,
NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
htonl(outdev->ifindex));
/* this is the bridge group "brX" */
+ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
- htonl(outdev->br_port->br->dev->ifindex));
+ htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
} else {
/* Case 2: indev is a bridge group, we need to look
* for physical device (when called from ipv4) */
@@ -619,6 +631,7 @@ nfulnl_log_packet(u_int8_t pf,
size += nla_total_size(data_len);
break;
+ case NFULNL_COPY_DISABLED:
default:
goto unlock_and_release;
}
@@ -672,7 +685,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
int i;
/* destroy all instances for this pid */
- write_lock_bh(&instances_lock);
+ spin_lock_bh(&instances_lock);
for (i = 0; i < INSTANCE_BUCKETS; i++) {
struct hlist_node *tmp, *t2;
struct nfulnl_instance *inst;
@@ -684,7 +697,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
__instance_destroy(inst);
}
}
- write_unlock_bh(&instances_lock);
+ spin_unlock_bh(&instances_lock);
}
return NOTIFY_DONE;
}
@@ -861,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st)
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
if (!hlist_empty(&instance_table[st->bucket]))
- return instance_table[st->bucket].first;
+ return rcu_dereference_bh(instance_table[st->bucket].first);
}
return NULL;
}
static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
{
- h = h->next;
+ h = rcu_dereference_bh(h->next);
while (!h) {
if (++st->bucket >= INSTANCE_BUCKETS)
return NULL;
- h = instance_table[st->bucket].first;
+ h = rcu_dereference_bh(instance_table[st->bucket].first);
}
return h;
}
@@ -890,9 +903,9 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
}
static void *seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(instances_lock)
+ __acquires(rcu_bh)
{
- read_lock_bh(&instances_lock);
+ rcu_read_lock_bh();
return get_idx(seq->private, *pos);
}
@@ -903,9 +916,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
}
static void seq_stop(struct seq_file *s, void *v)
- __releases(instances_lock)
+ __releases(rcu_bh)
{
- read_unlock_bh(&instances_lock);
+ rcu_read_unlock_bh();
}
static int seq_show(struct seq_file *s, void *v)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 12e1ab37fcd8..68e67d19724d 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -46,17 +46,19 @@ struct nfqnl_instance {
int peer_pid;
unsigned int queue_maxlen;
unsigned int copy_range;
- unsigned int queue_total;
unsigned int queue_dropped;
unsigned int queue_user_dropped;
- unsigned int id_sequence; /* 'sequence' of pkt ids */
u_int16_t queue_num; /* number of this queue */
u_int8_t copy_mode;
-
- spinlock_t lock;
-
+/*
+ * Following fields are dirtied for each queued packet,
+ * keep them in same cache line if possible.
+ */
+ spinlock_t lock;
+ unsigned int queue_total;
+ atomic_t id_sequence; /* 'sequence' of pkt ids */
struct list_head queue_list; /* packets in queue */
};
@@ -238,32 +240,24 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
outdev = entry->outdev;
- spin_lock_bh(&queue->lock);
-
- switch ((enum nfqnl_config_mode)queue->copy_mode) {
+ switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
case NFQNL_COPY_META:
case NFQNL_COPY_NONE:
break;
case NFQNL_COPY_PACKET:
if (entskb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_help(entskb)) {
- spin_unlock_bh(&queue->lock);
+ skb_checksum_help(entskb))
return NULL;
- }
- if (queue->copy_range == 0
- || queue->copy_range > entskb->len)
+
+ data_len = ACCESS_ONCE(queue->copy_range);
+ if (data_len == 0 || data_len > entskb->len)
data_len = entskb->len;
- else
- data_len = queue->copy_range;
size += nla_total_size(data_len);
break;
}
- entry->id = queue->id_sequence++;
-
- spin_unlock_bh(&queue->lock);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
@@ -278,6 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(queue->queue_num);
+ entry->id = atomic_inc_return(&queue->id_sequence);
pmsg.packet_id = htonl(entry->id);
pmsg.hw_protocol = entskb->protocol;
pmsg.hook = entry->hook;
@@ -296,8 +291,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
htonl(indev->ifindex));
/* this is the bridge group "brX" */
+ /* rcu_read_lock()ed by __nf_queue */
NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
- htonl(indev->br_port->br->dev->ifindex));
+ htonl(br_port_get_rcu(indev)->br->dev->ifindex));
} else {
/* Case 2: indev is bridge group, we need to look for
* physical device (when called from ipv4) */
@@ -321,8 +317,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
htonl(outdev->ifindex));
/* this is the bridge group "brX" */
+ /* rcu_read_lock()ed by __nf_queue */
NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
- htonl(outdev->br_port->br->dev->ifindex));
+ htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
} else {
/* Case 2: outdev is bridge group, we need to look for
* physical output device (when called from ipv4) */
@@ -866,7 +863,7 @@ static int seq_show(struct seq_file *s, void *v)
inst->peer_pid, inst->queue_total,
inst->copy_mode, inst->copy_range,
inst->queue_dropped, inst->queue_user_dropped,
- inst->id_sequence, 1);
+ atomic_read(&inst->id_sequence), 1);
}
static const struct seq_operations nfqnl_seq_ops = {
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 562bf3266e04..0cb6053f02fd 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -67,7 +67,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
return -EINVAL;
if (info->flags & XT_CT_NOTRACK) {
- ct = &nf_conntrack_untracked;
+ ct = nf_ct_untracked_get();
atomic_inc(&ct->ct_general.use);
goto out;
}
@@ -132,7 +132,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
struct nf_conn *ct = info->ct;
struct nf_conn_help *help;
- if (ct != &nf_conntrack_untracked) {
+ if (!nf_ct_is_untracked(ct)) {
help = nfct_help(ct);
if (help)
module_put(help->helper->me);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
new file mode 100644
index 000000000000..e11090a0675c
--- /dev/null
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -0,0 +1,314 @@
+/*
+ * linux/net/netfilter/xt_IDLETIMER.c
+ *
+ * Netfilter module to trigger a timer when packet matches.
+ * After timer expires a kevent will be sent.
+ *
+ * Copyright (C) 2004, 2010 Nokia Corporation
+ * Written by Timo Teras <ext-timo.teras@nokia.com>
+ *
+ * Converted to x_tables and reworked for upstream inclusion
+ * by Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_IDLETIMER.h>
+#include <linux/kobject.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+
+struct idletimer_tg_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+};
+
+struct idletimer_tg {
+ struct list_head entry;
+ struct timer_list timer;
+ struct work_struct work;
+
+ struct kobject *kobj;
+ struct idletimer_tg_attr attr;
+
+ unsigned int refcnt;
+};
+
+static LIST_HEAD(idletimer_tg_list);
+static DEFINE_MUTEX(list_mutex);
+
+static struct kobject *idletimer_tg_kobj;
+
+static
+struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
+{
+ struct idletimer_tg *entry;
+
+ BUG_ON(!label);
+
+ list_for_each_entry(entry, &idletimer_tg_list, entry) {
+ if (!strcmp(label, entry->attr.attr.name))
+ return entry;
+ }
+
+ return NULL;
+}
+
+static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct idletimer_tg *timer;
+ unsigned long expires = 0;
+
+ mutex_lock(&list_mutex);
+
+ timer = __idletimer_tg_find_by_label(attr->name);
+ if (timer)
+ expires = timer->timer.expires;
+
+ mutex_unlock(&list_mutex);
+
+ if (time_after(expires, jiffies))
+ return sprintf(buf, "%u\n",
+ jiffies_to_msecs(expires - jiffies) / 1000);
+
+ return sprintf(buf, "0\n");
+}
+
+static void idletimer_tg_work(struct work_struct *work)
+{
+ struct idletimer_tg *timer = container_of(work, struct idletimer_tg,
+ work);
+
+ sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+}
+
+static void idletimer_tg_expired(unsigned long data)
+{
+ struct idletimer_tg *timer = (struct idletimer_tg *) data;
+
+ pr_debug("timer %s expired\n", timer->attr.attr.name);
+
+ schedule_work(&timer->work);
+}
+
+static int idletimer_tg_create(struct idletimer_tg_info *info)
+{
+ int ret;
+
+ info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+ if (!info->timer) {
+ pr_debug("couldn't alloc timer\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
+ if (!info->timer->attr.attr.name) {
+ pr_debug("couldn't alloc attribute name\n");
+ ret = -ENOMEM;
+ goto out_free_timer;
+ }
+ info->timer->attr.attr.mode = S_IRUGO;
+ info->timer->attr.show = idletimer_tg_show;
+
+ ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
+ if (ret < 0) {
+ pr_debug("couldn't add file to sysfs");
+ goto out_free_attr;
+ }
+
+ list_add(&info->timer->entry, &idletimer_tg_list);
+
+ setup_timer(&info->timer->timer, idletimer_tg_expired,
+ (unsigned long) info->timer);
+ info->timer->refcnt = 1;
+
+ mod_timer(&info->timer->timer,
+ msecs_to_jiffies(info->timeout * 1000) + jiffies);
+
+ INIT_WORK(&info->timer->work, idletimer_tg_work);
+
+ return 0;
+
+out_free_attr:
+ kfree(info->timer->attr.attr.name);
+out_free_timer:
+ kfree(info->timer);
+out:
+ return ret;
+}
+
+/*
+ * The actual xt_tables plugin.
+ */
+static unsigned int idletimer_tg_target(struct sk_buff *skb,
+ const struct xt_action_param *par)
+{
+ const struct idletimer_tg_info *info = par->targinfo;
+
+ pr_debug("resetting timer %s, timeout period %u\n",
+ info->label, info->timeout);
+
+ BUG_ON(!info->timer);
+
+ mod_timer(&info->timer->timer,
+ msecs_to_jiffies(info->timeout * 1000) + jiffies);
+
+ return XT_CONTINUE;
+}
+
+static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
+{
+ struct idletimer_tg_info *info = par->targinfo;
+ int ret;
+
+ pr_debug("checkentry targinfo%s\n", info->label);
+
+ if (info->timeout == 0) {
+ pr_debug("timeout value is zero\n");
+ return -EINVAL;
+ }
+
+ if (info->label[0] == '\0' ||
+ strnlen(info->label,
+ MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
+ pr_debug("label is empty or not nul-terminated\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&list_mutex);
+
+ info->timer = __idletimer_tg_find_by_label(info->label);
+ if (info->timer) {
+ info->timer->refcnt++;
+ mod_timer(&info->timer->timer,
+ msecs_to_jiffies(info->timeout * 1000) + jiffies);
+
+ pr_debug("increased refcnt of timer %s to %u\n",
+ info->label, info->timer->refcnt);
+ } else {
+ ret = idletimer_tg_create(info);
+ if (ret < 0) {
+ pr_debug("failed to create timer\n");
+ mutex_unlock(&list_mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&list_mutex);
+ return 0;
+}
+
+static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
+{
+ const struct idletimer_tg_info *info = par->targinfo;
+
+ pr_debug("destroy targinfo %s\n", info->label);
+
+ mutex_lock(&list_mutex);
+
+ if (--info->timer->refcnt == 0) {
+ pr_debug("deleting timer %s\n", info->label);
+
+ list_del(&info->timer->entry);
+ del_timer_sync(&info->timer->timer);
+ sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+ kfree(info->timer->attr.attr.name);
+ kfree(info->timer);
+ } else {
+ pr_debug("decreased refcnt of timer %s to %u\n",
+ info->label, info->timer->refcnt);
+ }
+
+ mutex_unlock(&list_mutex);
+}
+
+static struct xt_target idletimer_tg __read_mostly = {
+ .name = "IDLETIMER",
+ .family = NFPROTO_UNSPEC,
+ .target = idletimer_tg_target,
+ .targetsize = sizeof(struct idletimer_tg_info),
+ .checkentry = idletimer_tg_checkentry,
+ .destroy = idletimer_tg_destroy,
+ .me = THIS_MODULE,
+};
+
+static struct class *idletimer_tg_class;
+
+static struct device *idletimer_tg_device;
+
+static int __init idletimer_tg_init(void)
+{
+ int err;
+
+ idletimer_tg_class = class_create(THIS_MODULE, "xt_idletimer");
+ err = PTR_ERR(idletimer_tg_class);
+ if (IS_ERR(idletimer_tg_class)) {
+ pr_debug("couldn't register device class\n");
+ goto out;
+ }
+
+ idletimer_tg_device = device_create(idletimer_tg_class, NULL,
+ MKDEV(0, 0), NULL, "timers");
+ err = PTR_ERR(idletimer_tg_device);
+ if (IS_ERR(idletimer_tg_device)) {
+ pr_debug("couldn't register system device\n");
+ goto out_class;
+ }
+
+ idletimer_tg_kobj = &idletimer_tg_device->kobj;
+
+ err = xt_register_target(&idletimer_tg);
+ if (err < 0) {
+ pr_debug("couldn't register xt target\n");
+ goto out_dev;
+ }
+
+ return 0;
+out_dev:
+ device_destroy(idletimer_tg_class, MKDEV(0, 0));
+out_class:
+ class_destroy(idletimer_tg_class);
+out:
+ return err;
+}
+
+static void __exit idletimer_tg_exit(void)
+{
+ xt_unregister_target(&idletimer_tg);
+
+ device_destroy(idletimer_tg_class, MKDEV(0, 0));
+ class_destroy(idletimer_tg_class);
+}
+
+module_init(idletimer_tg_init);
+module_exit(idletimer_tg_exit);
+
+MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_DESCRIPTION("Xtables: idle time monitor");
+MODULE_LICENSE("GPL v2");
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
index 512b9123252f..9d782181b6c8 100644
--- a/net/netfilter/xt_NOTRACK.c
+++ b/net/netfilter/xt_NOTRACK.c
@@ -23,7 +23,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
If there is a real ct entry correspondig to this packet,
it'll hang aroun till timing out. We don't deal with it
for performance reasons. JK */
- skb->nfct = &nf_conntrack_untracked.ct_general;
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 69c01e10f8af..de079abd5bc8 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -60,13 +60,22 @@ struct xt_rateest *xt_rateest_lookup(const char *name)
}
EXPORT_SYMBOL_GPL(xt_rateest_lookup);
+static void xt_rateest_free_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct xt_rateest, rcu));
+}
+
void xt_rateest_put(struct xt_rateest *est)
{
mutex_lock(&xt_rateest_mutex);
if (--est->refcnt == 0) {
hlist_del(&est->list);
gen_kill_estimator(&est->bstats, &est->rstats);
- kfree(est);
+ /*
+ * gen_estimator est_timer() might access est->lock or bstats,
+ * wait a RCU grace period before freeing 'est'
+ */
+ call_rcu(&est->rcu, xt_rateest_free_rcu);
}
mutex_unlock(&xt_rateest_mutex);
}
@@ -179,6 +188,7 @@ static int __init xt_rateest_tg_init(void)
static void __exit xt_rateest_tg_fini(void)
{
xt_unregister_target(&xt_rateest_tg_reg);
+ rcu_barrier(); /* Wait for completion of call_rcu()'s (xt_rateest_free_rcu) */
}
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 62ec021fbd50..eb81c380da1b 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -165,8 +165,8 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
rcu_read_unlock();
if (rt != NULL) {
- mtu = dst_mtu(&rt->u.dst);
- dst_release(&rt->u.dst);
+ mtu = dst_mtu(&rt->dst);
+ dst_release(&rt->dst);
}
return mtu;
}
@@ -220,15 +220,13 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
}
#endif
-#define TH_SYN 0x02
-
/* Must specify -p tcp --syn */
static inline bool find_syn_match(const struct xt_entry_match *m)
{
const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
- tcpinfo->flg_cmp & TH_SYN &&
+ tcpinfo->flg_cmp & TCPHDR_SYN &&
!(tcpinfo->invflags & XT_TCP_INV_FLAGS))
return true;
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 859d9fd429c8..22a2d421e7eb 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -77,8 +77,8 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
return false;
skb_dst_drop(skb);
- skb_dst_set(skb, &rt->u.dst);
- skb->dev = rt->u.dst.dev;
+ skb_dst_set(skb, &rt->dst);
+ skb->dev = rt->dst.dev;
skb->protocol = htons(ETH_P_IP);
return true;
}
@@ -104,7 +104,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
#ifdef WITH_CONNTRACK
/* Avoid counting cloned packets towards the original connection. */
nf_conntrack_put(skb->nfct);
- skb->nfct = &nf_conntrack_untracked.ct_general;
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);
#endif
@@ -177,7 +177,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
#ifdef WITH_CONNTRACK
nf_conntrack_put(skb->nfct);
- skb->nfct = &nf_conntrack_untracked.ct_general;
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);
#endif
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 30b95a1c1c89..f4af1bfafb1c 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -120,7 +120,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
if (ct == NULL)
return false;
- if (ct == &nf_conntrack_untracked)
+ if (nf_ct_is_untracked(ct))
return false;
if (ct->master)
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 39681f10291c..e536710ad916 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -123,11 +123,12 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
ct = nf_ct_get(skb, &ctinfo);
- if (ct == &nf_conntrack_untracked)
- statebit = XT_CONNTRACK_STATE_UNTRACKED;
- else if (ct != NULL)
- statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
- else
+ if (ct) {
+ if (nf_ct_is_untracked(ct))
+ statebit = XT_CONNTRACK_STATE_UNTRACKED;
+ else
+ statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
+ } else
statebit = XT_CONNTRACK_STATE_INVALID;
if (info->match_flags & XT_CONNTRACK_STATE) {
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index c04fcf385c59..ef36a56a02c6 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -3,6 +3,7 @@
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/ipv6.h>
+#include <net/sctp/sctp.h>
#include <linux/sctp.h>
#include <linux/netfilter/x_tables.h>
@@ -67,7 +68,7 @@ match_packet(const struct sk_buff *skb,
++i, offset, sch->type, htons(sch->length),
sch->flags);
#endif
- offset += (ntohs(sch->length) + 3) & ~3;
+ offset += WORD_ROUND(ntohs(sch->length));
pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 3d54c236a1ba..1ca89908cbad 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -127,7 +127,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
* reply packet of an established SNAT-ted connection. */
ct = nf_ct_get(skb, &ctinfo);
- if (ct && (ct != &nf_conntrack_untracked) &&
+ if (ct && !nf_ct_is_untracked(ct) &&
((iph->protocol != IPPROTO_ICMP &&
ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) ||
(iph->protocol == IPPROTO_ICMP &&
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
index e12e053d3782..a507922d80cd 100644
--- a/net/netfilter/xt_state.c
+++ b/net/netfilter/xt_state.c
@@ -26,14 +26,16 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par)
const struct xt_state_info *sinfo = par->matchinfo;
enum ip_conntrack_info ctinfo;
unsigned int statebit;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- if (nf_ct_is_untracked(skb))
- statebit = XT_STATE_UNTRACKED;
- else if (!nf_ct_get(skb, &ctinfo))
+ if (!ct)
statebit = XT_STATE_INVALID;
- else
- statebit = XT_STATE_BIT(ctinfo);
-
+ else {
+ if (nf_ct_is_untracked(ct))
+ statebit = XT_STATE_UNTRACKED;
+ else
+ statebit = XT_STATE_BIT(ctinfo);
+ }
return (sinfo->statemask & statebit);
}
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 96e62b8fd6b1..42ecb71d445f 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -18,8 +18,8 @@
#include <linux/netfilter/x_tables.h>
struct xt_statistic_priv {
- uint32_t count;
-};
+ atomic_t count;
+} ____cacheline_aligned_in_smp;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -27,13 +27,12 @@ MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
MODULE_ALIAS("ipt_statistic");
MODULE_ALIAS("ip6t_statistic");
-static DEFINE_SPINLOCK(nth_lock);
-
static bool
statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_statistic_info *info = par->matchinfo;
bool ret = info->flags & XT_STATISTIC_INVERT;
+ int nval, oval;
switch (info->mode) {
case XT_STATISTIC_MODE_RANDOM:
@@ -41,12 +40,12 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
ret = !ret;
break;
case XT_STATISTIC_MODE_NTH:
- spin_lock_bh(&nth_lock);
- if (info->master->count++ == info->u.nth.every) {
- info->master->count = 0;
+ do {
+ oval = atomic_read(&info->master->count);
+ nval = (oval == info->u.nth.every) ? 0 : oval + 1;
+ } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
+ if (nval == 0)
ret = !ret;
- }
- spin_unlock_bh(&nth_lock);
break;
}
@@ -64,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
if (info->master == NULL)
return -ENOMEM;
- info->master->count = info->u.nth.count;
+ atomic_set(&info->master->count, info->u.nth.count);
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a2eb965207d3..7aeaa83193db 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1323,19 +1323,23 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (msg->msg_flags&MSG_OOB)
return -EOPNOTSUPP;
- if (NULL == siocb->scm)
+ if (NULL == siocb->scm) {
siocb->scm = &scm;
+ memset(&scm, 0, sizeof(scm));
+ }
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
if (msg->msg_namelen) {
+ err = -EINVAL;
if (addr->nl_family != AF_NETLINK)
- return -EINVAL;
+ goto out;
dst_pid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
+ err = -EPERM;
if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
- return -EPERM;
+ goto out;
} else {
dst_pid = nlk->dst_pid;
dst_group = nlk->dst_group;
@@ -1387,6 +1391,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
out:
+ scm_destroy(siocb->scm);
return err;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2078a277e06b..9a17f28b1253 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -83,6 +83,7 @@
#include <linux/if_vlan.h>
#include <linux/virtio_net.h>
#include <linux/errqueue.h>
+#include <linux/net_tstamp.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
@@ -202,6 +203,7 @@ struct packet_sock {
unsigned int tp_hdrlen;
unsigned int tp_reserve;
unsigned int tp_loss:1;
+ unsigned int tp_tstamp;
struct packet_type prot_hook ____cacheline_aligned_in_smp;
};
@@ -656,6 +658,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct sk_buff *copy_skb = NULL;
struct timeval tv;
struct timespec ts;
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
if (skb->pkt_type == PACKET_LOOPBACK)
goto drop;
@@ -737,7 +740,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h1->tp_snaplen = snaplen;
h.h1->tp_mac = macoff;
h.h1->tp_net = netoff;
- if (skb->tstamp.tv64)
+ if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
+ && shhwtstamps->syststamp.tv64)
+ tv = ktime_to_timeval(shhwtstamps->syststamp);
+ else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
+ && shhwtstamps->hwtstamp.tv64)
+ tv = ktime_to_timeval(shhwtstamps->hwtstamp);
+ else if (skb->tstamp.tv64)
tv = ktime_to_timeval(skb->tstamp);
else
do_gettimeofday(&tv);
@@ -750,7 +759,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h2->tp_snaplen = snaplen;
h.h2->tp_mac = macoff;
h.h2->tp_net = netoff;
- if (skb->tstamp.tv64)
+ if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
+ && shhwtstamps->syststamp.tv64)
+ ts = ktime_to_timespec(shhwtstamps->syststamp);
+ else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
+ && shhwtstamps->hwtstamp.tv64)
+ ts = ktime_to_timespec(shhwtstamps->hwtstamp);
+ else if (skb->tstamp.tv64)
ts = ktime_to_timespec(skb->tstamp);
else
getnstimeofday(&ts);
@@ -2027,6 +2042,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
po->has_vnet_hdr = !!val;
return 0;
}
+ case PACKET_TIMESTAMP:
+ {
+ int val;
+
+ if (optlen != sizeof(val))
+ return -EINVAL;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ po->tp_tstamp = val;
+ return 0;
+ }
default:
return -ENOPROTOOPT;
}
@@ -2119,6 +2146,12 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
val = po->tp_loss;
data = &val;
break;
+ case PACKET_TIMESTAMP:
+ if (len > sizeof(int))
+ len = sizeof(int);
+ val = po->tp_tstamp;
+ data = &val;
+ break;
default:
return -ENOPROTOOPT;
}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index c33da6576942..b18e48fae975 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -162,6 +162,14 @@ int phonet_address_add(struct net_device *dev, u8 addr)
return err;
}
+static void phonet_device_rcu_free(struct rcu_head *head)
+{
+ struct phonet_device *pnd;
+
+ pnd = container_of(head, struct phonet_device, rcu);
+ kfree(pnd);
+}
+
int phonet_address_del(struct net_device *dev, u8 addr)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
@@ -179,10 +187,9 @@ int phonet_address_del(struct net_device *dev, u8 addr)
pnd = NULL;
mutex_unlock(&pndevs->lock);
- if (pnd) {
- synchronize_rcu();
- kfree(pnd);
- }
+ if (pnd)
+ call_rcu(&pnd->rcu, phonet_device_rcu_free);
+
return err;
}
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index f0f85b0123f7..9f1729bd60de 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -64,8 +64,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
return;
}
- peer->if_mtu = dst_mtu(&rt->u.dst);
- dst_release(&rt->u.dst);
+ peer->if_mtu = dst_mtu(&rt->dst);
+ dst_release(&rt->dst);
_leave(" [if_mtu %u]", peer->if_mtu);
}
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 972378f47f3c..23b25f89e7e0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -26,6 +26,11 @@
#include <net/act_api.h>
#include <net/netlink.h>
+static void tcf_common_free_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct tcf_common, tcfc_rcu));
+}
+
void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
{
unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
@@ -38,7 +43,11 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
write_unlock_bh(hinfo->lock);
gen_kill_estimator(&p->tcfc_bstats,
&p->tcfc_rate_est);
- kfree(p);
+ /*
+ * gen_estimator est_timer() might access p->tcfc_lock
+ * or bstats, wait a RCU grace period before freeing p
+ */
+ call_rcu(&p->tcfc_rcu, tcf_common_free_rcu);
return;
}
}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 50e3d945e1f4..a0593c9640db 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -127,8 +127,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
int i, munged = 0;
unsigned int off;
- if (!(skb->tc_verd & TC_OK2MUNGE)) {
- /* should we set skb->cloned? */
+ if (skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
return p->tcf_action;
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 654f73dff7c1..537a48732e9e 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -97,6 +97,11 @@ nla_put_failure:
goto done;
}
+static void tcf_police_free_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct tcf_police, tcf_rcu));
+}
+
static void tcf_police_destroy(struct tcf_police *p)
{
unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
@@ -113,7 +118,11 @@ static void tcf_police_destroy(struct tcf_police *p)
qdisc_put_rtab(p->tcfp_R_tab);
if (p->tcfp_P_tab)
qdisc_put_rtab(p->tcfp_P_tab);
- kfree(p);
+ /*
+ * gen_estimator est_timer() might access p->tcf_lock
+ * or bstats, wait a RCU grace period before freeing p
+ */
+ call_rcu(&p->tcf_rcu, tcf_police_free_rcu);
return;
}
}
@@ -397,6 +406,7 @@ static void __exit
police_cleanup_module(void)
{
tcf_unregister_action(&act_police_ops);
+ rcu_barrier(); /* Wait for completion of call_rcu()'s (tcf_police_free_rcu) */
}
module_init(police_init_module);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index a63029ef3edd..d20fcd2a5519 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -205,7 +205,7 @@ void __qdisc_run(struct Qdisc *q)
}
}
- clear_bit(__QDISC_STATE_RUNNING, &q->state);
+ qdisc_run_end(q);
}
unsigned long dev_trans_start(struct net_device *dev)
@@ -327,6 +327,24 @@ void netif_carrier_off(struct net_device *dev)
}
EXPORT_SYMBOL(netif_carrier_off);
+/**
+ * netif_notify_peers - notify network peers about existence of @dev
+ * @dev: network device
+ *
+ * Generate traffic such that interested network peers are aware of
+ * @dev, such as by generating a gratuitous ARP. This may be used when
+ * a device wants to inform the rest of the network about some sort of
+ * reconfiguration such as a failover event or virtual machine
+ * migration.
+ */
+void netif_notify_peers(struct net_device *dev)
+{
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+ rtnl_unlock();
+}
+EXPORT_SYMBOL(netif_notify_peers);
+
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
under all circumstances. It is difficult to invent anything faster or
cheaper.
@@ -543,6 +561,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
INIT_LIST_HEAD(&sch->list);
skb_queue_head_init(&sch->q);
+ spin_lock_init(&sch->busylock);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
@@ -779,7 +798,7 @@ static bool some_qdisc_is_busy(struct net_device *dev)
spin_lock_bh(root_lock);
- val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
+ val = (qdisc_is_running(q) ||
test_bit(__QDISC_STATE_SCHED, &q->state));
spin_unlock_bh(root_lock);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0b52b8de562c..4be8d04b262d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1550,7 +1550,6 @@ static const struct Qdisc_class_ops htb_class_ops = {
};
static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
- .next = NULL,
.cl_ops = &htb_class_ops,
.id = "htb",
.priv_size = sizeof(struct htb_sched),
@@ -1561,7 +1560,6 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.init = htb_init,
.reset = htb_reset,
.destroy = htb_destroy,
- .change = NULL /* htb_change */,
.dump = htb_dump,
.owner = THIS_MODULE,
};
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 182749867c72..a0e1a7fdebbf 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -490,7 +490,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
__func__, &fl.fl4_dst, &fl.fl4_src);
if (!ip_route_output_key(&init_net, &rt, &fl)) {
- dst = &rt->u.dst;
+ dst = &rt->dst;
}
/* If there is no association or if a source address is passed, no
@@ -534,7 +534,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
fl.fl_ip_sport = laddr->a.v4.sin_port;
if (!ip_route_output_key(&init_net, &rt, &fl)) {
- dst = &rt->u.dst;
+ dst = &rt->dst;
goto out_unlock;
}
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index bd2a50b482ac..246f92924658 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1817,7 +1817,7 @@ malformed:
struct __sctp_missing {
__be32 num_missing;
__be16 type;
-} __attribute__((packed));
+} __packed;
/*
* Report a missing mandatory parameter.
diff --git a/net/socket.c b/net/socket.c
index 367d5477d00f..acfa1738663d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -124,7 +124,7 @@ static int sock_fasync(int fd, struct file *filp, int on);
static ssize_t sock_sendpage(struct file *file, struct page *page,
int offset, size_t size, loff_t *ppos, int more);
static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
+ struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
/*
@@ -162,7 +162,7 @@ static const struct net_proto_family *net_families[NPROTO] __read_mostly;
* Statistics counters of the socket lists
*/
-static DEFINE_PER_CPU(int, sockets_in_use) = 0;
+static DEFINE_PER_CPU(int, sockets_in_use);
/*
* Support routines.
@@ -309,9 +309,9 @@ static int init_inodecache(void)
}
static const struct super_operations sockfs_ops = {
- .alloc_inode = sock_alloc_inode,
- .destroy_inode =sock_destroy_inode,
- .statfs = simple_statfs,
+ .alloc_inode = sock_alloc_inode,
+ .destroy_inode = sock_destroy_inode,
+ .statfs = simple_statfs,
};
static int sockfs_get_sb(struct file_system_type *fs_type,
@@ -411,6 +411,7 @@ int sock_map_fd(struct socket *sock, int flags)
return fd;
}
+EXPORT_SYMBOL(sock_map_fd);
static struct socket *sock_from_file(struct file *file, int *err)
{
@@ -422,7 +423,7 @@ static struct socket *sock_from_file(struct file *file, int *err)
}
/**
- * sockfd_lookup - Go from a file number to its socket slot
+ * sockfd_lookup - Go from a file number to its socket slot
* @fd: file handle
* @err: pointer to an error code return
*
@@ -450,6 +451,7 @@ struct socket *sockfd_lookup(int fd, int *err)
fput(file);
return sock;
}
+EXPORT_SYMBOL(sockfd_lookup);
static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
{
@@ -540,6 +542,7 @@ void sock_release(struct socket *sock)
}
sock->file = NULL;
}
+EXPORT_SYMBOL(sock_release);
int sock_tx_timestamp(struct msghdr *msg, struct sock *sk,
union skb_shared_tx *shtx)
@@ -586,6 +589,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
ret = wait_on_sync_kiocb(&iocb);
return ret;
}
+EXPORT_SYMBOL(sock_sendmsg);
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
@@ -604,6 +608,7 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
set_fs(oldfs);
return result;
}
+EXPORT_SYMBOL(kernel_sendmsg);
static int ktime2ts(ktime_t kt, struct timespec *ts)
{
@@ -664,7 +669,6 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
put_cmsg(msg, SOL_SOCKET,
SCM_TIMESTAMPING, sizeof(ts), &ts);
}
-
EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -720,6 +724,7 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg,
ret = wait_on_sync_kiocb(&iocb);
return ret;
}
+EXPORT_SYMBOL(sock_recvmsg);
static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
@@ -752,6 +757,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
set_fs(oldfs);
return result;
}
+EXPORT_SYMBOL(kernel_recvmsg);
static void sock_aio_dtor(struct kiocb *iocb)
{
@@ -774,7 +780,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
}
static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
+ struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct socket *sock = file->private_data;
@@ -887,7 +893,7 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
*/
static DEFINE_MUTEX(br_ioctl_mutex);
-static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL;
+static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg);
void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *))
{
@@ -895,7 +901,6 @@ void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *))
br_ioctl_hook = hook;
mutex_unlock(&br_ioctl_mutex);
}
-
EXPORT_SYMBOL(brioctl_set);
static DEFINE_MUTEX(vlan_ioctl_mutex);
@@ -907,7 +912,6 @@ void vlan_ioctl_set(int (*hook) (struct net *, void __user *))
vlan_ioctl_hook = hook;
mutex_unlock(&vlan_ioctl_mutex);
}
-
EXPORT_SYMBOL(vlan_ioctl_set);
static DEFINE_MUTEX(dlci_ioctl_mutex);
@@ -919,7 +923,6 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
dlci_ioctl_hook = hook;
mutex_unlock(&dlci_ioctl_mutex);
}
-
EXPORT_SYMBOL(dlci_ioctl_set);
static long sock_do_ioctl(struct net *net, struct socket *sock,
@@ -1047,6 +1050,7 @@ out_release:
sock = NULL;
goto out;
}
+EXPORT_SYMBOL(sock_create_lite);
/* No kernel lock held - perfect */
static unsigned int sock_poll(struct file *file, poll_table *wait)
@@ -1147,6 +1151,7 @@ call_kill:
rcu_read_unlock();
return 0;
}
+EXPORT_SYMBOL(sock_wake_async);
static int __sock_create(struct net *net, int family, int type, int protocol,
struct socket **res, int kern)
@@ -1265,11 +1270,13 @@ int sock_create(int family, int type, int protocol, struct socket **res)
{
return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
}
+EXPORT_SYMBOL(sock_create);
int sock_create_kern(int family, int type, int protocol, struct socket **res)
{
return __sock_create(&init_net, family, type, protocol, res, 1);
}
+EXPORT_SYMBOL(sock_create_kern);
SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
{
@@ -1474,7 +1481,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
goto out;
err = -ENFILE;
- if (!(newsock = sock_alloc()))
+ newsock = sock_alloc();
+ if (!newsock)
goto out_put;
newsock->type = sock->type;
@@ -1861,8 +1869,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
if (MSG_CMSG_COMPAT & flags) {
if (get_compat_msghdr(&msg_sys, msg_compat))
return -EFAULT;
- }
- else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
+ } else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
return -EFAULT;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
@@ -1964,8 +1971,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
if (MSG_CMSG_COMPAT & flags) {
if (get_compat_msghdr(msg_sys, msg_compat))
return -EFAULT;
- }
- else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
+ } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
return -EFAULT;
err = -EMSGSIZE;
@@ -2191,10 +2197,10 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
/* Argument list sizes for sys_socketcall */
#define AL(x) ((x) * sizeof(unsigned long))
static const unsigned char nargs[20] = {
- AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
- AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
- AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
- AL(4),AL(5)
+ AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
+ AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
+ AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
+ AL(4), AL(5)
};
#undef AL
@@ -2340,6 +2346,7 @@ int sock_register(const struct net_proto_family *ops)
printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
return err;
}
+EXPORT_SYMBOL(sock_register);
/**
* sock_unregister - remove a protocol handler
@@ -2366,6 +2373,7 @@ void sock_unregister(int family)
printk(KERN_INFO "NET: Unregistered protocol family %d\n", family);
}
+EXPORT_SYMBOL(sock_unregister);
static int __init sock_init(void)
{
@@ -2490,13 +2498,13 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
ifc.ifc_req = NULL;
uifc = compat_alloc_user_space(sizeof(struct ifconf));
} else {
- size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) *
- sizeof (struct ifreq);
+ size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) *
+ sizeof(struct ifreq);
uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
ifc.ifc_len = len;
ifr = ifc.ifc_req = (void __user *)(uifc + 1);
ifr32 = compat_ptr(ifc32.ifcbuf);
- for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) {
+ for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) {
if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq)))
return -EFAULT;
ifr++;
@@ -2516,9 +2524,9 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
ifr = ifc.ifc_req;
ifr32 = compat_ptr(ifc32.ifcbuf);
for (i = 0, j = 0;
- i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
- i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) {
- if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq)))
+ i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
+ i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) {
+ if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq)))
return -EFAULT;
ifr32++;
ifr++;
@@ -2567,7 +2575,7 @@ static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32
compat_uptr_t uptr32;
struct ifreq __user *uifr;
- uifr = compat_alloc_user_space(sizeof (*uifr));
+ uifr = compat_alloc_user_space(sizeof(*uifr));
if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
return -EFAULT;
@@ -2601,9 +2609,9 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
return -EFAULT;
old_fs = get_fs();
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
err = dev_ioctl(net, cmd, &kifr);
- set_fs (old_fs);
+ set_fs(old_fs);
return err;
case SIOCBONDSLAVEINFOQUERY:
@@ -2710,9 +2718,9 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
return -EFAULT;
old_fs = get_fs();
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
err = dev_ioctl(net, cmd, (void __user *)&ifr);
- set_fs (old_fs);
+ set_fs(old_fs);
if (cmd == SIOCGIFMAP && !err) {
err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
@@ -2734,7 +2742,7 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif
compat_uptr_t uptr32;
struct ifreq __user *uifr;
- uifr = compat_alloc_user_space(sizeof (*uifr));
+ uifr = compat_alloc_user_space(sizeof(*uifr));
if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
return -EFAULT;
@@ -2750,20 +2758,20 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif
}
struct rtentry32 {
- u32 rt_pad1;
+ u32 rt_pad1;
struct sockaddr rt_dst; /* target address */
struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
struct sockaddr rt_genmask; /* target network mask (IP) */
- unsigned short rt_flags;
- short rt_pad2;
- u32 rt_pad3;
- unsigned char rt_tos;
- unsigned char rt_class;
- short rt_pad4;
- short rt_metric; /* +1 for binary compatibility! */
+ unsigned short rt_flags;
+ short rt_pad2;
+ u32 rt_pad3;
+ unsigned char rt_tos;
+ unsigned char rt_class;
+ short rt_pad4;
+ short rt_metric; /* +1 for binary compatibility! */
/* char * */ u32 rt_dev; /* forcing the device at add */
- u32 rt_mtu; /* per route MTU/Window */
- u32 rt_window; /* Window clamping */
+ u32 rt_mtu; /* per route MTU/Window */
+ u32 rt_window; /* Window clamping */
unsigned short rt_irtt; /* Initial RTT */
};
@@ -2793,29 +2801,29 @@ static int routing_ioctl(struct net *net, struct socket *sock,
if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */
struct in6_rtmsg32 __user *ur6 = argp;
- ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst),
+ ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst),
3 * sizeof(struct in6_addr));
- ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type));
- ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
- ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
- ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric));
- ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info));
- ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags));
- ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
+ ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type));
+ ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
+ ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
+ ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric));
+ ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info));
+ ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags));
+ ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
r = (void *) &r6;
} else { /* ipv4 */
struct rtentry32 __user *ur4 = argp;
- ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst),
+ ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst),
3 * sizeof(struct sockaddr));
- ret |= __get_user (r4.rt_flags, &(ur4->rt_flags));
- ret |= __get_user (r4.rt_metric, &(ur4->rt_metric));
- ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu));
- ret |= __get_user (r4.rt_window, &(ur4->rt_window));
- ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt));
- ret |= __get_user (rtdev, &(ur4->rt_dev));
+ ret |= __get_user(r4.rt_flags, &(ur4->rt_flags));
+ ret |= __get_user(r4.rt_metric, &(ur4->rt_metric));
+ ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu));
+ ret |= __get_user(r4.rt_window, &(ur4->rt_window));
+ ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt));
+ ret |= __get_user(rtdev, &(ur4->rt_dev));
if (rtdev) {
- ret |= copy_from_user (devname, compat_ptr(rtdev), 15);
+ ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
r4.rt_dev = devname; devname[15] = 0;
} else
r4.rt_dev = NULL;
@@ -2828,9 +2836,9 @@ static int routing_ioctl(struct net *net, struct socket *sock,
goto out;
}
- set_fs (KERNEL_DS);
+ set_fs(KERNEL_DS);
ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
- set_fs (old_fs);
+ set_fs(old_fs);
out:
return ret;
@@ -2993,11 +3001,13 @@ int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
{
return sock->ops->bind(sock, addr, addrlen);
}
+EXPORT_SYMBOL(kernel_bind);
int kernel_listen(struct socket *sock, int backlog)
{
return sock->ops->listen(sock, backlog);
}
+EXPORT_SYMBOL(kernel_listen);
int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
{
@@ -3022,24 +3032,28 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
done:
return err;
}
+EXPORT_SYMBOL(kernel_accept);
int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags)
{
return sock->ops->connect(sock, addr, addrlen, flags);
}
+EXPORT_SYMBOL(kernel_connect);
int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
int *addrlen)
{
return sock->ops->getname(sock, addr, addrlen, 0);
}
+EXPORT_SYMBOL(kernel_getsockname);
int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
int *addrlen)
{
return sock->ops->getname(sock, addr, addrlen, 1);
}
+EXPORT_SYMBOL(kernel_getpeername);
int kernel_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen)
@@ -3056,6 +3070,7 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
set_fs(oldfs);
return err;
}
+EXPORT_SYMBOL(kernel_getsockopt);
int kernel_setsockopt(struct socket *sock, int level, int optname,
char *optval, unsigned int optlen)
@@ -3072,6 +3087,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
set_fs(oldfs);
return err;
}
+EXPORT_SYMBOL(kernel_setsockopt);
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags)
@@ -3083,6 +3099,7 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
return sock_no_sendpage(sock, page, offset, size, flags);
}
+EXPORT_SYMBOL(kernel_sendpage);
int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
{
@@ -3095,33 +3112,10 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
return err;
}
+EXPORT_SYMBOL(kernel_sock_ioctl);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
{
return sock->ops->shutdown(sock, how);
}
-
-EXPORT_SYMBOL(sock_create);
-EXPORT_SYMBOL(sock_create_kern);
-EXPORT_SYMBOL(sock_create_lite);
-EXPORT_SYMBOL(sock_map_fd);
-EXPORT_SYMBOL(sock_recvmsg);
-EXPORT_SYMBOL(sock_register);
-EXPORT_SYMBOL(sock_release);
-EXPORT_SYMBOL(sock_sendmsg);
-EXPORT_SYMBOL(sock_unregister);
-EXPORT_SYMBOL(sock_wake_async);
-EXPORT_SYMBOL(sockfd_lookup);
-EXPORT_SYMBOL(kernel_sendmsg);
-EXPORT_SYMBOL(kernel_recvmsg);
-EXPORT_SYMBOL(kernel_bind);
-EXPORT_SYMBOL(kernel_listen);
-EXPORT_SYMBOL(kernel_accept);
-EXPORT_SYMBOL(kernel_connect);
-EXPORT_SYMBOL(kernel_getsockname);
-EXPORT_SYMBOL(kernel_getpeername);
-EXPORT_SYMBOL(kernel_getsockopt);
-EXPORT_SYMBOL(kernel_setsockopt);
-EXPORT_SYMBOL(kernel_sendpage);
-EXPORT_SYMBOL(kernel_sock_ioctl);
EXPORT_SYMBOL(kernel_sock_shutdown);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index fef2cc5e9d2b..75ba48b0d12a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -282,7 +282,7 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
return s;
}
-static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
+static struct sock *unix_find_socket_byinode(struct inode *i)
{
struct sock *s;
struct hlist_node *node;
@@ -292,9 +292,6 @@ static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->dentry;
- if (!net_eq(sock_net(s), net))
- continue;
-
if (dentry && dentry->d_inode == i) {
sock_hold(s);
goto found;
@@ -450,11 +447,31 @@ static int unix_release_sock(struct sock *sk, int embrion)
return 0;
}
+static void init_peercred(struct sock *sk)
+{
+ put_pid(sk->sk_peer_pid);
+ if (sk->sk_peer_cred)
+ put_cred(sk->sk_peer_cred);
+ sk->sk_peer_pid = get_pid(task_tgid(current));
+ sk->sk_peer_cred = get_current_cred();
+}
+
+static void copy_peercred(struct sock *sk, struct sock *peersk)
+{
+ put_pid(sk->sk_peer_pid);
+ if (sk->sk_peer_cred)
+ put_cred(sk->sk_peer_cred);
+ sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+}
+
static int unix_listen(struct socket *sock, int backlog)
{
int err;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
+ struct pid *old_pid = NULL;
+ const struct cred *old_cred = NULL;
err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
@@ -470,12 +487,14 @@ static int unix_listen(struct socket *sock, int backlog)
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
/* set credentials so connect can copy them */
- sk->sk_peercred.pid = task_tgid_vnr(current);
- current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid);
+ init_peercred(sk);
err = 0;
out_unlock:
unix_state_unlock(sk);
+ put_pid(old_pid);
+ if (old_cred)
+ put_cred(old_cred);
out:
return err;
}
@@ -736,7 +755,7 @@ static struct sock *unix_find_other(struct net *net,
err = -ECONNREFUSED;
if (!S_ISSOCK(inode->i_mode))
goto put_fail;
- u = unix_find_socket_byinode(net, inode);
+ u = unix_find_socket_byinode(inode);
if (!u)
goto put_fail;
@@ -1140,8 +1159,7 @@ restart:
unix_peer(newsk) = sk;
newsk->sk_state = TCP_ESTABLISHED;
newsk->sk_type = sk->sk_type;
- newsk->sk_peercred.pid = task_tgid_vnr(current);
- current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
+ init_peercred(newsk);
newu = unix_sk(newsk);
newsk->sk_wq = &newu->peer_wq;
otheru = unix_sk(other);
@@ -1157,7 +1175,7 @@ restart:
}
/* Set credentials */
- sk->sk_peercred = other->sk_peercred;
+ copy_peercred(sk, other);
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
@@ -1199,10 +1217,8 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
sock_hold(skb);
unix_peer(ska) = skb;
unix_peer(skb) = ska;
- ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
- current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid);
- ska->sk_peercred.uid = skb->sk_peercred.uid;
- ska->sk_peercred.gid = skb->sk_peercred.gid;
+ init_peercred(ska);
+ init_peercred(skb);
if (ska->sk_type != SOCK_DGRAM) {
ska->sk_state = TCP_ESTABLISHED;
@@ -1297,18 +1313,20 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
int i;
scm->fp = UNIXCB(skb).fp;
- skb->destructor = sock_wfree;
UNIXCB(skb).fp = NULL;
for (i = scm->fp->count-1; i >= 0; i--)
unix_notinflight(scm->fp->fp[i]);
}
-static void unix_destruct_fds(struct sk_buff *skb)
+static void unix_destruct_scm(struct sk_buff *skb)
{
struct scm_cookie scm;
memset(&scm, 0, sizeof(scm));
- unix_detach_fds(&scm, skb);
+ scm.pid = UNIXCB(skb).pid;
+ scm.cred = UNIXCB(skb).cred;
+ if (UNIXCB(skb).fp)
+ unix_detach_fds(&scm, skb);
/* Alas, it calls VFS */
/* So fscking what? fput() had been SMP-safe since the last Summer */
@@ -1331,10 +1349,22 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
for (i = scm->fp->count-1; i >= 0; i--)
unix_inflight(scm->fp->fp[i]);
- skb->destructor = unix_destruct_fds;
return 0;
}
+static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
+{
+ int err = 0;
+ UNIXCB(skb).pid = get_pid(scm->pid);
+ UNIXCB(skb).cred = get_cred(scm->cred);
+ UNIXCB(skb).fp = NULL;
+ if (scm->fp && send_fds)
+ err = unix_attach_fds(scm, skb);
+
+ skb->destructor = unix_destruct_scm;
+ return err;
+}
+
/*
* Send AF_UNIX data.
*/
@@ -1391,12 +1421,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (skb == NULL)
goto out;
- memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
- if (siocb->scm->fp) {
- err = unix_attach_fds(siocb->scm, skb);
- if (err)
- goto out_free;
- }
+ err = unix_scm_to_skb(siocb->scm, skb, true);
+ if (err)
+ goto out_free;
unix_get_secdata(siocb->scm, skb);
skb_reset_transport_header(skb);
@@ -1566,16 +1593,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
*/
size = min_t(int, size, skb_tailroom(skb));
- memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
+
/* Only send the fds in the first buffer */
- if (siocb->scm->fp && !fds_sent) {
- err = unix_attach_fds(siocb->scm, skb);
- if (err) {
- kfree_skb(skb);
- goto out_err;
- }
- fds_sent = true;
+ err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
+ if (err) {
+ kfree_skb(skb);
+ goto out_err;
}
+ fds_sent = true;
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err) {
@@ -1692,7 +1717,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
siocb->scm = &tmp_scm;
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
- siocb->scm->creds = *UNIXCREDS(skb);
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
unix_set_secdata(siocb->scm, skb);
if (!(flags & MSG_PEEK)) {
@@ -1841,14 +1866,14 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if (check_creds) {
/* Never glue messages from different writers */
- if (memcmp(UNIXCREDS(skb), &siocb->scm->creds,
- sizeof(siocb->scm->creds)) != 0) {
+ if ((UNIXCB(skb).pid != siocb->scm->pid) ||
+ (UNIXCB(skb).cred != siocb->scm->cred)) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} else {
/* Copy credentials */
- siocb->scm->creds = *UNIXCREDS(skb);
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
check_creds = 1;
}
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index b01a6f6397d7..d0c92dddb26b 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -35,8 +35,9 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
if (!ht_cap->ht_supported)
return NULL;
- if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
- ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
+ if (channel_type != NL80211_CHAN_HT20 &&
+ (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
+ ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT))
return NULL;
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index ae930acf75e9..63d57ae399c3 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -339,6 +339,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
+ bool channel_type_valid,
const u8 *buf, size_t len, u64 *cookie);
/* SME */
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 48ead6f0426d..9f95354f859f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -827,6 +827,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
+ bool channel_type_valid,
const u8 *buf, size_t len, u64 *cookie)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -845,8 +846,9 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
if (!wdev->current_bss ||
memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
ETH_ALEN) != 0 ||
- memcmp(wdev->current_bss->pub.bssid, mgmt->da,
- ETH_ALEN) != 0)
+ (wdev->iftype == NL80211_IFTYPE_STATION &&
+ memcmp(wdev->current_bss->pub.bssid, mgmt->da,
+ ETH_ALEN) != 0))
return -ENOTCONN;
}
@@ -855,7 +857,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
/* Transmit the Action frame as requested by user space */
return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type,
- buf, len, cookie);
+ channel_type_valid, buf, len, cookie);
}
bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index db71150b8040..6b41d15c4a05 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1107,7 +1107,7 @@ static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
enum nl80211_iftype iftype)
{
if (!use_4addr) {
- if (netdev && netdev->br_port)
+ if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT))
return -EBUSY;
return 0;
}
@@ -3955,6 +3955,55 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
}
}
+ if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) {
+ u8 *rates =
+ nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+ int n_rates =
+ nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+ struct ieee80211_supported_band *sband =
+ wiphy->bands[ibss.channel->band];
+ int i, j;
+
+ if (n_rates == 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < n_rates; i++) {
+ int rate = (rates[i] & 0x7f) * 5;
+ bool found = false;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ if (sband->bitrates[j].bitrate == rate) {
+ found = true;
+ ibss.basic_rates |= BIT(j);
+ break;
+ }
+ }
+ if (!found) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ } else {
+ /*
+ * If no rates were explicitly configured,
+ * use the mandatory rate set for 11b or
+ * 11a for maximum compatibility.
+ */
+ struct ieee80211_supported_band *sband =
+ wiphy->bands[ibss.channel->band];
+ int j;
+ u32 flag = ibss.channel->band == IEEE80211_BAND_5GHZ ?
+ IEEE80211_RATE_MANDATORY_A :
+ IEEE80211_RATE_MANDATORY_B;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ if (sband->bitrates[j].flags & flag)
+ ibss.basic_rates |= BIT(j);
+ }
+ }
+
err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
out:
@@ -4653,7 +4702,8 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
if (err)
goto unlock_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4681,6 +4731,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
struct net_device *dev;
struct ieee80211_channel *chan;
enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+ bool channel_type_valid = false;
u32 freq;
int err;
void *hdr;
@@ -4702,7 +4753,8 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
err = -EOPNOTSUPP;
goto out;
}
@@ -4722,6 +4774,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
err = -EINVAL;
goto out;
}
+ channel_type_valid = true;
}
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -4745,6 +4798,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
goto free_msg;
}
err = cfg80211_mlme_action(rdev, dev, chan, channel_type,
+ channel_type_valid,
nla_data(info->attrs[NL80211_ATTR_FRAME]),
nla_len(info->attrs[NL80211_ATTR_FRAME]),
&cookie);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 3416373a9c0c..0c8a1e8b7690 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -770,8 +770,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
return -EOPNOTSUPP;
/* if it's part of a bridge, reject changing type to station/ibss */
- if (dev->br_port && (ntype == NL80211_IFTYPE_ADHOC ||
- ntype == NL80211_IFTYPE_STATION))
+ if ((dev->priv_flags & IFF_BRIDGE_PORT) &&
+ (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION))
return -EBUSY;
if (ntype != otype) {